org.apache.hadoop.hive.ql.io.orc.Reader.hasMetadataValue()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(4.7k)|赞(0)|评价(0)|浏览(134)

本文整理了Java中org.apache.hadoop.hive.ql.io.orc.Reader.hasMetadataValue方法的一些代码示例,展示了Reader.hasMetadataValue的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Reader.hasMetadataValue方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.io.orc.Reader
类名称:Reader
方法名:hasMetadataValue

Reader.hasMetadataValue介绍

[英]Did the user set the given metadata value.
[中]用户是否设置了给定的元数据值。

代码示例

代码示例来源:origin: apache/hive

public static boolean isOriginal(Reader file) {
 return !file.hasMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME);
}

代码示例来源:origin: apache/drill

public static boolean isOriginal(Reader file) {
 return !file.hasMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME);
}

代码示例来源:origin: apache/hive

static RecordIdentifier[] parseKeyIndex(Reader reader) {
 String[] stripes;
 try {
  if (!reader.hasMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME)) {
   return null;
  }
  ByteBuffer val =
    reader.getMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME)
      .duplicate();
  stripes = utf8Decoder.decode(val).toString().split(";");
 } catch (CharacterCodingException e) {
  throw new IllegalArgumentException("Bad string encoding for " +
    OrcRecordUpdater.ACID_KEY_INDEX_NAME, e);
 }
 RecordIdentifier[] result = new RecordIdentifier[stripes.length];
 for(int i=0; i < stripes.length; ++i) {
  if (stripes[i].length() != 0) {
   String[] parts = stripes[i].split(",");
   result[i] = new RecordIdentifier();
   result[i].setValues(Long.parseLong(parts[0]),
     Integer.parseInt(parts[1]), Long.parseLong(parts[2]));
  }
 }
 return result;
}

代码示例来源:origin: apache/hive

private static boolean needsCompaction(FileStatus bucket, FileSystem fs) throws IOException {
 //create reader, look at footer
 //no need to check side file since it can only be in a streaming ingest delta
 Reader orcReader = OrcFile.createReader(bucket.getPath(), OrcFile.readerOptions(fs.getConf()).filesystem(fs));
 if (orcReader.hasMetadataValue(ACID_STATS)) {
  try {
   ByteBuffer val = orcReader.getMetadataValue(ACID_STATS).duplicate();
   String acidStats = utf8Decoder.decode(val).toString();
   String[] parts = acidStats.split(",");
   long updates = Long.parseLong(parts[1]);
   long deletes = Long.parseLong(parts[2]);
   return deletes > 0 || updates > 0;
  } catch (CharacterCodingException e) {
   throw new IllegalArgumentException("Bad string encoding for " + ACID_STATS, e);
  }
 } else {
  throw new IllegalStateException("AcidStats missing in " + bucket.getPath());
 }
}

代码示例来源:origin: apache/hive

/**
 * This is smart enough to handle streaming ingest where there could be a
 * {@link OrcAcidUtils#DELTA_SIDE_FILE_SUFFIX} side file.
 * @param dataFile - ORC acid data file
 * @return version property from file if there,
 *          {@link #ORC_ACID_VERSION_DEFAULT} otherwise
 */
@VisibleForTesting
public static int getAcidVersionFromDataFile(Path dataFile, FileSystem fs) throws IOException {
 FileStatus fileStatus = fs.getFileStatus(dataFile);
 Reader orcReader = OrcFile.createReader(dataFile,
   OrcFile.readerOptions(fs.getConf())
     .filesystem(fs)
     //make sure to check for side file in case streaming ingest died
     .maxLength(getLogicalLength(fs, fileStatus)));
 if (orcReader.hasMetadataValue(ACID_VERSION_KEY)) {
  char[] versionChar = UTF8.decode(orcReader.getMetadataValue(ACID_VERSION_KEY)).array();
  String version = new String(versionChar);
  return Integer.valueOf(version);
 }
 return ORC_ACID_VERSION_DEFAULT;
}
/**

代码示例来源:origin: apache/hive

Mockito.when(recordReader.next(row3)).thenReturn(row5);
Mockito.when(reader.hasMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME))
  .thenReturn(true);
Mockito.when(reader.getMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME))

代码示例来源:origin: com.facebook.presto.hive/hive-apache

public static boolean isOriginal(Reader file) {
 return !file.hasMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME);
}

代码示例来源:origin: com.hotels/corc-core

private boolean isAtomic(Reader orcReader) {
 // Use org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.isOriginal(Reader) from hive-exec:1.1.0
 boolean atomic = orcReader.hasMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME);
 LOG.debug("Atomic ORCFile: {}", atomic);
 return atomic;
}

代码示例来源:origin: com.facebook.presto/presto-raptor

try (Closer<RecordReader, IOException> recordReader = closer(reader.rows(), RecordReader::close);
    Closer<Writer, IOException> writer = closer(createWriter(path(output), writerOptions), Writer::close)) {
  if (reader.hasMetadataValue(OrcFileMetadata.KEY)) {
    ByteBuffer orcFileMetadata = reader.getMetadataValue(OrcFileMetadata.KEY);
    writer.get().addUserMetadata(OrcFileMetadata.KEY, orcFileMetadata);

代码示例来源:origin: prestosql/presto

try (Closer<RecordReader, IOException> recordReader = closer(reader.rows(), RecordReader::close);
    Closer<Writer, IOException> writer = closer(createWriter(path(output), writerOptions), Writer::close)) {
  if (reader.hasMetadataValue(OrcFileMetadata.KEY)) {
    ByteBuffer orcFileMetadata = reader.getMetadataValue(OrcFileMetadata.KEY);
    writer.get().addUserMetadata(OrcFileMetadata.KEY, orcFileMetadata);

相关文章