org.apache.hadoop.io.SequenceFile.createWriter()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(10.1k)|赞(0)|评价(0)|浏览(208)

本文整理了Java中org.apache.hadoop.io.SequenceFile.createWriter()方法的一些代码示例,展示了SequenceFile.createWriter()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。SequenceFile.createWriter()方法的具体详情如下:
包路径:org.apache.hadoop.io.SequenceFile
类名称:SequenceFile
方法名:createWriter

SequenceFile.createWriter介绍

[英]Construct the preferred type of 'raw' SequenceFile Writer.
[中]构造首选类型的“原始”SequenceFile Writer。

代码示例

代码示例来源:origin: apache/hbase

void writePartitionFile(Configuration conf, Path path) throws IOException {
 FileSystem fs = path.getFileSystem(conf);
 @SuppressWarnings("deprecation")
 SequenceFile.Writer writer = SequenceFile.createWriter(
  fs, conf, path, ImmutableBytesWritable.class, NullWritable.class);
 for (int i = 0; i < partitions.size(); i++) {
  writer.append(partitions.get(i), NullWritable.get());
 }
 writer.close();
}

代码示例来源:origin: apache/kylin

@SuppressWarnings("deprecation")
public static void writeToSequenceFile(Configuration conf, String outputPath, Map<String, String> counterMap) throws IOException {
  try (SequenceFile.Writer writer = SequenceFile.createWriter(getWorkingFileSystem(conf), conf, new Path(outputPath), Text.class, Text.class)) {
    for (Map.Entry<String, String> counterEntry : counterMap.entrySet()) {
      writer.append(new Text(counterEntry.getKey()), new Text(counterEntry.getValue()));
    }
  }
}

代码示例来源:origin: pinterest/secor

public SequenceFileWriter(LogFilePath path, CompressionCodec codec) throws IOException {
  Configuration config = new Configuration();
  fsPath = new Path(path.getLogFilePath());
  FileSystem fs = FileUtil.getFileSystem(path.getLogFilePath());
  if (codec != null) {
    this.mWriter = SequenceFile.createWriter(fs, config, fsPath,
        LongWritable.class, BytesWritable.class,
        SequenceFile.CompressionType.BLOCK, codec);
  } else {
    this.mWriter = SequenceFile.createWriter(fs, config, fsPath,
        LongWritable.class, BytesWritable.class);
  }
  this.mKey = new LongWritable();
  this.mValue = new BytesWritable();
  LOG.info("Created sequence file writer: {}", fsPath);
}

代码示例来源:origin: apache/storm

private static void createSeqFile(FileSystem fs, Path file, int rowCount) throws IOException {
  Configuration conf = new Configuration();
  try {
    if (fs.exists(file)) {
      fs.delete(file, false);
    }
    SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, file, IntWritable.class, Text.class);
    for (int i = 0; i < rowCount; i++) {
      w.append(new IntWritable(i), new Text("line " + i));
    }
    w.close();
    System.out.println("done");
  } catch (IOException e) {
    e.printStackTrace();
  }
}

代码示例来源:origin: pinterest/secor

public MessagePackSequenceFileWriter(LogFilePath path, CompressionCodec codec) throws IOException {
  Configuration config = new Configuration();
  Path fsPath = new Path(path.getLogFilePath());
  FileSystem fs = FileUtil.getFileSystem(path.getLogFilePath());
  if (codec != null) {
    this.mWriter = SequenceFile.createWriter(fs, config, fsPath,
        BytesWritable.class, BytesWritable.class,
        SequenceFile.CompressionType.BLOCK, codec);
  } else {
    this.mWriter = SequenceFile.createWriter(fs, config, fsPath,
        BytesWritable.class, BytesWritable.class);
  }
  this.mKey = new BytesWritable();
  this.mValue = new BytesWritable();
}

代码示例来源:origin: apache/kylin

SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath, ImmutableBytesWritable.class,
    NullWritable.class);
    writer.append(startKey, NullWritable.get());
  writer.close();

代码示例来源:origin: apache/kylin

@Override
public void doCleanup(Context context) throws IOException, InterruptedException {
  mos.close();
  Path outputDirBase = new Path(context.getConfiguration().get(FileOutputFormat.OUTDIR), PathNameCuboidBase);
  FileSystem fs = FileSystem.get(context.getConfiguration());
  if (!fs.exists(outputDirBase)) {
    fs.mkdirs(outputDirBase);
    SequenceFile
        .createWriter(context.getConfiguration(),
            SequenceFile.Writer.file(new Path(outputDirBase, "part-m-00000")),
            SequenceFile.Writer.keyClass(Text.class), SequenceFile.Writer.valueClass(Text.class))
        .close();
  }
}

代码示例来源:origin: apache/kylin

SequenceFile.Writer writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(outputFilePath),
    SequenceFile.Writer.keyClass(LongWritable.class), SequenceFile.Writer.valueClass(BytesWritable.class));
try {
  writer.append(new LongWritable(-1), new BytesWritable(Bytes.toBytes(mapperOverlapRatio)));
  writer.append(new LongWritable(-2), new BytesWritable(Bytes.toBytes(mapperNumber)));
  writer.append(new LongWritable(0L), new BytesWritable(Bytes.toBytes(samplingPercentage)));

代码示例来源:origin: apache/kylin

@Override
public void doCleanup(Context context) throws IOException, InterruptedException {
  mos.close();
  Path outputDirBase = new Path(context.getConfiguration().get(FileOutputFormat.OUTDIR), PathNameCuboidBase);
  FileSystem fs = FileSystem.get(context.getConfiguration());
  if (!fs.exists(outputDirBase)) {
    fs.mkdirs(outputDirBase);
    SequenceFile
        .createWriter(context.getConfiguration(),
            SequenceFile.Writer.file(new Path(outputDirBase, "part-m-00000")),
            SequenceFile.Writer.keyClass(Text.class), SequenceFile.Writer.valueClass(Text.class))
        .close();
  }
}

代码示例来源:origin: apache/hbase

SequenceFile.Writer writer = SequenceFile.createWriter(
 fs, conf, partitionsPath, ImmutableBytesWritable.class,
 NullWritable.class);
  writer.append(startKey, NullWritable.get());
 writer.close();

代码示例来源:origin: apache/incubator-gobblin

public void put(String storeName, String tableName, T state) throws IOException {
 String tmpTableName = this.useTmpFileForPut ? TMP_FILE_PREFIX + tableName : tableName;
 Path tmpTablePath = new Path(new Path(this.storeRootDir, storeName), tmpTableName);
 try {
  @SuppressWarnings("deprecation")
  SequenceFile.Writer writer = closer.register(SequenceFile.createWriter(this.fs, this.conf, tmpTablePath,
    Text.class, this.stateClass, SequenceFile.CompressionType.BLOCK, new DefaultCodec()));
  writer.append(new Text(Strings.nullToEmpty(state.getId())), state);
 } catch (Throwable t) {
  throw closer.rethrow(t);

代码示例来源:origin: apache/hive

public void writePartitionKeys(Path path, JobConf job) throws IOException {
 byte[][] partitionKeys = getPartitionKeys(job.getNumReduceTasks());
 int numPartition = partitionKeys.length + 1;
 if (numPartition != job.getNumReduceTasks()) {
  job.setNumReduceTasks(numPartition);
 }
 FileSystem fs = path.getFileSystem(job);
 SequenceFile.Writer writer = SequenceFile.createWriter(fs, job, path,
   BytesWritable.class, NullWritable.class);
 try {
  for (byte[] pkey : partitionKeys) {
   BytesWritable wrapper = new BytesWritable(pkey);
   writer.append(wrapper, NullWritable.get());
  }
 } finally {
  IOUtils.closeStream(writer);
 }
}

代码示例来源:origin: apache/incubator-gobblin

public void putAll(String storeName, String tableName, Collection<T> states) throws IOException {
 String tmpTableName = this.useTmpFileForPut ? TMP_FILE_PREFIX + tableName : tableName;
 Path tmpTablePath = new Path(new Path(this.storeRootDir, storeName), tmpTableName);
 try {
  @SuppressWarnings("deprecation")
  SequenceFile.Writer writer = closer.register(SequenceFile.createWriter(this.fs, this.conf, tmpTablePath,
    Text.class, this.stateClass, SequenceFile.CompressionType.BLOCK, new DefaultCodec()));
  for (T state : states) {
   writer.append(new Text(Strings.nullToEmpty(state.getId())), state);

代码示例来源:origin: apache/drill

public void writePartitionKeys(Path path, JobConf job) throws IOException {
 byte[][] partitionKeys = getPartitionKeys(job.getNumReduceTasks());
 int numPartition = partitionKeys.length + 1;
 if (numPartition != job.getNumReduceTasks()) {
  job.setNumReduceTasks(numPartition);
 }
 FileSystem fs = path.getFileSystem(job);
 SequenceFile.Writer writer = SequenceFile.createWriter(fs, job, path,
   BytesWritable.class, NullWritable.class);
 try {
  for (byte[] pkey : partitionKeys) {
   BytesWritable wrapper = new BytesWritable(pkey);
   writer.append(wrapper, NullWritable.get());
  }
 } finally {
  IOUtils.closeStream(writer);
 }
}

代码示例来源:origin: apache/storm

@Override
Path createOutputFile() throws IOException {
  Path p = new Path(this.fsUrl + this.fileNameFormat.getPath(),
           this.fileNameFormat.getName(this.rotation, System.currentTimeMillis()));
  this.writer = SequenceFile.createWriter(
    this.hdfsConfig,
    SequenceFile.Writer.file(p),
    SequenceFile.Writer.keyClass(this.format.keyClass()),
    SequenceFile.Writer.valueClass(this.format.valueClass()),
    SequenceFile.Writer.compression(this.compressionType, this.codecFactory.getCodecByName(this.compressionCodec))
  );
  return p;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

Writer writer = createWriter(conf, Writer.stream(out), 
  Writer.keyClass(keyClass), Writer.valueClass(valClass),
  Writer.compression(compressionType, codec),
 writer.appendRaw(rawBuffer, keyOffsets[p], keyLengths[p], rawValues[p]);
writer.close();

代码示例来源:origin: apache/kylin

final Path hfilePartitionFile = new Path(outputFolder, "part-r-00000_hfile");
short regionCount = (short) innerRegionSplits.size();
try (SequenceFile.Writer hfilePartitionWriter = SequenceFile.createWriter(hbaseConf,
    SequenceFile.Writer.file(hfilePartitionFile), SequenceFile.Writer.keyClass(RowKeyWritable.class),
    SequenceFile.Writer.valueClass(NullWritable.class))) {
    hfilePartitionWriter.append(
        new RowKeyWritable(KeyValueUtil.createFirstOnRow(splits.get(i), 9223372036854775807L).createKeyOnly(false).getKey()),
        NullWritable.get());

代码示例来源:origin: apache/hive

private void writeSeqenceFileTest(FileSystem fs, int rowCount, Path file,
  int columnNum, CompressionCodec codec) throws IOException {
 byte[][] columnRandom;
 resetRandomGenerators();
 BytesRefArrayWritable bytes = new BytesRefArrayWritable(columnNum);
 columnRandom = new byte[columnNum][];
 for (int i = 0; i < columnNum; i++) {
  BytesRefWritable cu = new BytesRefWritable();
  bytes.set(i, cu);
 }
 // zero length key is not allowed by block compress writer, so we use a byte
 // writable
 ByteWritable key = new ByteWritable();
 SequenceFile.Writer seqWriter = SequenceFile.createWriter(fs, conf, file,
   ByteWritable.class, BytesRefArrayWritable.class, CompressionType.BLOCK,
   codec);
 for (int i = 0; i < rowCount; i++) {
  nextRandomRow(columnRandom, bytes);
  seqWriter.append(key, bytes);
 }
 seqWriter.close();
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

throw new IOException("Mkdirs failed to create directory " + dirName);
Path dataFile = new Path(dirName, DATA_FILE_NAME);
Path indexFile = new Path(dirName, INDEX_FILE_NAME);
             SequenceFile.Writer.file(dataFile),
             SequenceFile.Writer.keyClass(keyClass));
this.data = SequenceFile.createWriter(conf, dataOptions);
   SequenceFile.Writer.valueClass(LongWritable.class),
   SequenceFile.Writer.compression(CompressionType.BLOCK));
this.index = SequenceFile.createWriter(conf, indexOptions);

代码示例来源:origin: apache/phoenix

SequenceFile.Writer writer = SequenceFile.createWriter(
 fs, conf, partitionsPath, TableRowkeyPair.class,
 NullWritable.class);
  writer.append(startKey, NullWritable.get());
 writer.close();

相关文章