serde用于配置单元中的简单序列文件

jjjwad0x  于 2021-06-03  发布在  Hadoop
关注(0)|答案(1)|浏览(260)

我有一个序列文件,它有文本键和双可写值。当我将文件作为外部表加载时

Create external table t (id String, data Double) STORED AS SEQUENCEFILE LOCATION '/output';

创建成功。但是当我试图用 select * 我得到例外
“失败,出现异常java.io.ioexception:org.apache.hadoop.hive.serde2.serdeexception:class org.apache.hadoop.hive.serde2.lazy.lazysimpleserde:需要byteswritable或text对象!”
我完全理解塞德是不正确的。我试着为它实现serde,但没能使它工作。我应该如何为它实现简单的serde?

iih3973s

iih3973s1#

解决方案稍微摆弄了一下输入格式,找到了解决方案。1) 出现异常是因为配置单元在默认情况下忽略了序列文件的键,因此在尝试匹配模式时会出现异常。
我实现了custominputformat

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.util.ReflectionUtils;

public abstract class PSequenceFileKeyRecordReader<K, V> implements RecordReader<K, BytesWritable> {

  private SequenceFile.Reader in;
  private long start;
  private long end;
  private boolean more = true;
  protected Configuration conf;

  public PSequenceFileKeyRecordReader(Configuration conf, FileSplit split)
    throws IOException {
    Path path = split.getPath();
    FileSystem fs = path.getFileSystem(conf);
    this.in = new SequenceFile.Reader(fs, path, conf);
    this.end = split.getStart() + split.getLength();
    this.conf = conf;

    if (split.getStart() > in.getPosition())
      in.sync(split.getStart());                  // sync to start

    this.start = in.getPosition();
    more = start < end;
  }

  public Class getKeyClass() { return in.getKeyClass(); }

  public Class getValueClass() { return in.getValueClass(); }

  @SuppressWarnings("unchecked")
  public K createKey() {
    return (K) ReflectionUtils.newInstance(getKeyClass(), conf);
  }

  public float getProgress() throws IOException {
    if (end == start) {
      return 0.0f;
    } else {
      return Math.min(1.0f, (in.getPosition() - start) / (float)(end - start));
    }
  }

  public synchronized long getPos() throws IOException {
    return in.getPosition();
  }

  protected synchronized void seek(long pos) throws IOException {
    in.seek(pos);
  }
  public synchronized void close() throws IOException { in.close(); }

@Override
public boolean next(K key, BytesWritable value) throws IOException {
    if (!more) return false;

    long pos = in.getPosition();
    V trueValue = (V) ReflectionUtils.newInstance(in.getValueClass(), conf);
    boolean remaining = in.next((Writable)key, (Writable)trueValue);
    if (remaining) combineKeyValue(key, trueValue, value);
    if (pos >= end && in.syncSeen()) {
      more = false;
    } else {
      more = remaining;
    }
    return more;
}
protected abstract void combineKeyValue(K key, V trueValue, BytesWritable newValue);
}

主reader类将文本和doublewritable扩展为byteswritable。

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileSplit;

public class DoubleTextReader extends PSequenceFileKeyRecordReader<Text, DoubleWritable>{

    public DoubleTextReader(Configuration conf, FileSplit split)
            throws IOException {
        super(conf, split);

    }

    @Override
    protected void combineKeyValue(Text key, DoubleWritable trueValue,
            BytesWritable newValue) {
            StringBuilder builder = new StringBuilder();
            builder.append(key);
            builder.append('\001');
            builder.append(trueValue.get());
            newValue.set(new BytesWritable(builder.toString().getBytes()) );

    }

    @Override
    public BytesWritable createValue() {        
        return new BytesWritable();
    }

}

自定义输入格式类

public class PSequenceFileKeyInputFormat<K, V> extends FileInputFormat<K, V> {

  public PSequenceFileKeyInputFormat() {
    setMinSplitSize(SequenceFile.SYNC_INTERVAL);
  }

  @Override
  protected FileStatus[] listStatus(JobConf job) throws IOException {
    FileStatus[] files = super.listStatus(job);
    for (int i = 0; i < files.length; i++) {
      FileStatus file = files[i];
      if (file.isDir()) {     // it's a MapFile
        Path dataFile = new Path(file.getPath(), MapFile.DATA_FILE_NAME);
        FileSystem fs = file.getPath().getFileSystem(job);
        // use the data file
        files[i] = fs.getFileStatus(dataFile);
      }
    }
    return files;
  }

  public RecordReader<K, V> getRecordReader(InputSplit split,
                                      JobConf job, Reporter reporter)
    throws IOException {

    reporter.setStatus(split.toString());

    return (RecordReader<K, V>) new DoubleTextReader(job, (FileSplit) split);
  }

可以使用命令创建表

Create external table t(id String, Bytes Double) STORED AS INPUTFORMAT 'PSequenceFileKeyInputFormat' OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat' location '/output';

相关问题