org.apache.hadoop.hive.ql.exec.Utilities.getMapRedWork()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(5.2k)|赞(0)|评价(0)|浏览(196)

本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.getMapRedWork()方法的一些代码示例,展示了Utilities.getMapRedWork()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.getMapRedWork()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:getMapRedWork

Utilities.getMapRedWork介绍

暂无

代码示例

代码示例来源:origin: apache/hive

@Override
 public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException {
  MapredWork work = Utilities.getMapRedWork(job);

  List<Operator<?>> opList = work.getAllOperators();

  for (Operator<?> op : opList) {
   if (op instanceof FileSinkOperator) {
    ((FileSinkOperator) op).checkOutputSpecs(ignored, job);
   }
  }
 }
}

代码示例来源:origin: apache/drill

@Override
 public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException {
  MapredWork work = Utilities.getMapRedWork(job);

  List<Operator<?>> opList = work.getAllOperators();

  for (Operator<?> op : opList) {
   if (op instanceof FileSinkOperator) {
    ((FileSinkOperator) op).checkOutputSpecs(ignored, job);
   }
  }
 }
}

代码示例来源:origin: apache/hive

if (rowLength == null) {
 LOG.debug("No table property in JobConf. Try to recover the table directly");
 Map<String, PartitionDesc> partitionDescMap = Utilities.getMapRedWork(job).getMapWork().getAliasToPartnInfo();
 for (String alias : Utilities.getMapRedWork(job).getMapWork().getAliasToPartnInfo().keySet()) {
  LOG.debug(format("the current alias: %s", alias));
  rowLength = partitionDescMap.get(alias).getTableDesc().getProperties().getProperty(TD_ROW_LENGTH);

代码示例来源:origin: apache/hive

Utilities.setMapRedWork(job, mrwork, new Path(System.getProperty("java.io.tmpdir") + File.separator +
 System.getProperty("user.name") + File.separator + "hive"));
MapredWork mrwork2 = Utilities.getMapRedWork(job);
Utilities.clearWork(job);

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

protected void init(JobConf job) {
 mrwork = Utilities.getMapRedWork(job);
 pathToPartitionInfo = mrwork.getPathToPartitionInfo();
}

代码示例来源:origin: org.apache.parquet/parquet-hive-0.10-binding

/**
 * Initialize the mrwork variable in order to get all the partition and start to update the jobconf
 *
 * @param job
 */
private void init(final JobConf job) {
 final String plan = HiveConf.getVar(job, HiveConf.ConfVars.PLAN);
 if (mrwork == null && plan != null && plan.length() > 0) {
  mrwork = Utilities.getMapRedWork(job);
  pathToPartitionInfo.clear();
  for (final Map.Entry<String, PartitionDesc> entry : mrwork.getPathToPartitionInfo().entrySet()) {
   pathToPartitionInfo.put(new Path(entry.getKey()).toUri().getPath().toString(), entry.getValue());
  }
 }
}

代码示例来源:origin: com.twitter/parquet-hive-0.10-binding

/**
 * Initialize the mrwork variable in order to get all the partition and start to update the jobconf
 *
 * @param job
 */
private void init(final JobConf job) {
 final String plan = HiveConf.getVar(job, HiveConf.ConfVars.PLAN);
 if (mrwork == null && plan != null && plan.length() > 0) {
  mrwork = Utilities.getMapRedWork(job);
  pathToPartitionInfo.clear();
  for (final Map.Entry<String, PartitionDesc> entry : mrwork.getPathToPartitionInfo().entrySet()) {
   pathToPartitionInfo.put(new Path(entry.getKey()).toUri().getPath().toString(), entry.getValue());
  }
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

@Override
 public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException {
  MapredWork work = Utilities.getMapRedWork(job);

  List<Operator<?>> opList = work.getAllOperators();

  for (Operator<?> op : opList) {
   if (op instanceof FileSinkOperator) {
    ((FileSinkOperator) op).checkOutputSpecs(ignored, job);
   }
  }
 }
}

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

MapredWork mapRedWork = Utilities.getMapRedWork(job);

代码示例来源:origin: com.twitter.elephantbird/elephant-bird-hive

Utilities.getMapRedWork(job).getPathToPartitionInfo();

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

MapredWork gWork = Utilities.getMapRedWork(job);
reducer = gWork.getReducer();

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

execContext.setJc(jc);
MapredWork mrwork = Utilities.getMapRedWork(job);
mo = new MapOperator();
mo.setConf(mrwork);

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

public CombineHiveInputSplit(JobConf job, InputSplitShim inputSplitShim)
  throws IOException {
 this.inputSplitShim = inputSplitShim;
 if (job != null) {
  Map<String, PartitionDesc> pathToPartitionInfo = Utilities
    .getMapRedWork(job).getPathToPartitionInfo();
  // extract all the inputFormatClass names for each chunk in the
  // CombinedSplit.
  Path[] ipaths = inputSplitShim.getPaths();
  if (ipaths.length > 0) {
   PartitionDesc part = HiveFileFormatUtils
     .getPartitionDescFromPathRecursively(pathToPartitionInfo,
       ipaths[0], IOPrepareCache.get().getPartitionDescMap());
   inputFormatClassName = part.getInputFileFormatClass().getName();
  }
 }
}

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

/**
  * Writable interface.
  */
 public void write(DataOutput out) throws IOException {
  inputSplitShim.write(out);
  if (inputFormatClassName == null) {
   Map<String, PartitionDesc> pathToPartitionInfo = Utilities
     .getMapRedWork(getJob()).getPathToPartitionInfo();
   // extract all the inputFormatClass names for each chunk in the
   // CombinedSplit.
   PartitionDesc part = HiveFileFormatUtils.getPartitionDescFromPathRecursively(pathToPartitionInfo,
     inputSplitShim.getPath(0), IOPrepareCache.get().getPartitionDescMap());
   // create a new InputFormat instance if this is the first time to see
   // this class
   inputFormatClassName = part.getInputFileFormatClass().getName();
  }
  out.writeUTF(inputFormatClassName);
 }
}

相关文章

Utilities类方法