org.apache.hadoop.hive.ql.exec.Utilities.getVectorizedRowBatchCtx()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(6.0k)|赞(0)|评价(0)|浏览(126)

本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.getVectorizedRowBatchCtx()方法的一些代码示例,展示了Utilities.getVectorizedRowBatchCtx()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.getVectorizedRowBatchCtx()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:getVectorizedRowBatchCtx

Utilities.getVectorizedRowBatchCtx介绍

暂无

代码示例

代码示例来源:origin: apache/hive

public NullRowsRecordReader(Configuration conf, InputSplit split) throws IOException {
 boolean isVectorMode = Utilities.getIsVectorized(conf);
 if (LOG.isDebugEnabled()) {
  LOG.debug(getClass().getSimpleName() + " in "
    + (isVectorMode ? "" : "non-") + "vector mode");
 }
 if (isVectorMode) {
  rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
  int partitionColumnCount = rbCtx.getPartitionColumnCount();
  if (partitionColumnCount > 0) {
   partitionValues = new Object[partitionColumnCount];
   VectorizedRowBatchCtx.getPartitionValues(rbCtx, conf, (FileSplit)split, partitionValues);
  } else {
   partitionValues = null;
  }
 } else {
  rbCtx = null;
  partitionValues = null;
 }
}

代码示例来源:origin: apache/drill

public NullRowsRecordReader(Configuration conf, InputSplit split) throws IOException {
 boolean isVectorMode = Utilities.getUseVectorizedInputFileFormat(conf);
 if (LOG.isDebugEnabled()) {
  LOG.debug(getClass().getSimpleName() + " in "
    + (isVectorMode ? "" : "non-") + "vector mode");
 }
 if (isVectorMode) {
  rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
  int partitionColumnCount = rbCtx.getPartitionColumnCount();
  if (partitionColumnCount > 0) {
   partitionValues = new Object[partitionColumnCount];
   VectorizedRowBatchCtx.getPartitionValues(rbCtx, conf, (FileSplit)split, partitionValues);
  } else {
   partitionValues = null;
  }
 } else {
  rbCtx = null;
  partitionValues = null;
 }
}

代码示例来源:origin: apache/drill

@VisibleForTesting
public VectorizedParquetRecordReader(
 InputSplit inputSplit,
 JobConf conf) {
 try {
  serDeStats = new SerDeStats();
  projectionPusher = new ProjectionPusher();
  initialize(inputSplit, conf);
  colsToInclude = ColumnProjectionUtils.getReadColumnIDs(conf);
  rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
 } catch (Throwable e) {
  LOG.error("Failed to create the vectorized reader due to exception " + e);
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: apache/drill

public VectorizedParquetRecordReader(
 org.apache.hadoop.mapred.InputSplit oldInputSplit,
 JobConf conf) {
 try {
  serDeStats = new SerDeStats();
  projectionPusher = new ProjectionPusher();
  initialize(getSplit(oldInputSplit, conf), conf);
  colsToInclude = ColumnProjectionUtils.getReadColumnIDs(conf);
  rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
  initPartitionValues((FileSplit) oldInputSplit, conf);
 } catch (Throwable e) {
  LOG.error("Failed to create the vectorized reader due to exception " + e);
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: apache/hive

/**
 * {@link VectorizedOrcAcidRowBatchReader} is always used for vectorized reads of acid tables.
 * In some cases this cannot be used from LLAP IO elevator because
 * {@link RecordReader#getRowNumber()} is not (currently) available there but is required to
 * generate ROW__IDs for "original" files
 * @param hasDeletes - if there are any deletes that apply to this split
 * todo: HIVE-17944
 */
static boolean canUseLlapForAcid(OrcSplit split, boolean hasDeletes, Configuration conf) {
 if(!split.isOriginal()) {
  return true;
 }
 VectorizedRowBatchCtx rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
 if(rbCtx == null) {
  throw new IllegalStateException("Could not create VectorizedRowBatchCtx for " + split.getPath());
 }
 return !needSyntheticRowIds(split.isOriginal(), hasDeletes, areRowIdsProjected(rbCtx));
}

代码示例来源:origin: apache/hive

public VectorizedParquetRecordReader(
  org.apache.hadoop.mapred.InputSplit oldInputSplit, JobConf conf,
  FileMetadataCache metadataCache, DataCache dataCache, Configuration cacheConf) {
 try {
  this.metadataCache = metadataCache;
  this.cache = dataCache;
  this.cacheConf = cacheConf;
  serDeStats = new SerDeStats();
  projectionPusher = new ProjectionPusher();
  colsToInclude = ColumnProjectionUtils.getReadColumnIDs(conf);
  //initialize the rowbatchContext
  jobConf = conf;
  rbCtx = Utilities.getVectorizedRowBatchCtx(jobConf);
  ParquetInputSplit inputSplit = getSplit(oldInputSplit, conf);
  if (inputSplit != null) {
   initialize(inputSplit, conf);
  }
  initPartitionValues((FileSplit) oldInputSplit, conf);
 } catch (Throwable e) {
  LOG.error("Failed to create the vectorized reader due to exception " + e);
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: apache/hive

public DruidVectorizedWrapper(DruidQueryRecordReader reader, Configuration jobConf) {
 this.rbCtx = Utilities.getVectorizedRowBatchCtx(jobConf);
 if (rbCtx.getDataColumnNums() != null) {
  projectedColumns = rbCtx.getDataColumnNums();
 } else {
  // case all the columns are selected
  projectedColumns = new int[rbCtx.getRowColumnTypeInfos().length];
  for (int i = 0; i < projectedColumns.length; i++) {
   projectedColumns[i] = i;
  }
 }
 this.serDe = createAndInitializeSerde(jobConf);
 this.baseReader = Preconditions.checkNotNull(reader);
 // row parser and row assigner initializing
 try {
  vectorAssignRow.init((StructObjectInspector) serDe.getObjectInspector());
 } catch (HiveException e) {
  throw new RuntimeException(e);
 }
 druidWritable = baseReader.createValue();
 rowBoat = new Object[rbCtx.getDataColumnCount()];
}

代码示例来源:origin: apache/hive

Reporter reporter, VectorizedRowBatchCtx rbCtx) throws IOException {
this(conf, inputSplit, reporter,
  rbCtx == null ? Utilities.getVectorizedRowBatchCtx(conf) : rbCtx, false);

代码示例来源:origin: apache/hive

VectorizedKafkaRecordReader(KafkaInputSplit inputSplit, Configuration jobConf) {
 this.rbCtx = Utilities.getVectorizedRowBatchCtx(jobConf);
 if (rbCtx.getDataColumnNums() != null) {
  projectedColumns = rbCtx.getDataColumnNums();

代码示例来源:origin: apache/hive

Configuration conf) throws IOException {
VectorizedRowBatchCtx vrbCtx = Utilities.getVectorizedRowBatchCtx(conf);

代码示例来源:origin: apache/hive

rbCtx = Utilities.getVectorizedRowBatchCtx(conf);

代码示例来源:origin: apache/drill

rbCtx = Utilities.getVectorizedRowBatchCtx(conf);

代码示例来源:origin: apache/drill

rbCtx = Utilities.getVectorizedRowBatchCtx(conf);

代码示例来源:origin: org.apache.hive/kafka-handler

VectorizedKafkaRecordReader(KafkaInputSplit inputSplit, Configuration jobConf) {
 this.rbCtx = Utilities.getVectorizedRowBatchCtx(jobConf);
 if (rbCtx.getDataColumnNums() != null) {
  projectedColumns = rbCtx.getDataColumnNums();

相关文章

Utilities类方法