本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.setMapWork()
方法的一些代码示例,展示了Utilities.setMapWork()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.setMapWork()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:setMapWork
暂无
代码示例来源:origin: apache/hive
public static void setMapRedWork(Configuration conf, MapredWork w, Path hiveScratchDir) {
String useName = conf.get(INPUT_NAME);
if (useName == null) {
useName = "mapreduce:" + hiveScratchDir;
}
conf.set(INPUT_NAME, useName);
setMapWork(conf, w.getMapWork(), hiveScratchDir, true);
if (w.getReduceWork() != null) {
conf.set(INPUT_NAME, useName);
setReduceWork(conf, w.getReduceWork(), hiveScratchDir, true);
}
}
代码示例来源:origin: apache/drill
public static void setMapRedWork(Configuration conf, MapredWork w, Path hiveScratchDir) {
String useName = conf.get(INPUT_NAME);
if (useName == null) {
useName = "mapreduce";
}
conf.set(INPUT_NAME, useName);
setMapWork(conf, w.getMapWork(), hiveScratchDir, true);
if (w.getReduceWork() != null) {
conf.set(INPUT_NAME, useName);
setReduceWork(conf, w.getReduceWork(), hiveScratchDir, true);
}
}
代码示例来源:origin: apache/drill
@Override
public void processRow(Object key, Object value) throws IOException {
// reset the execContext for each new row
execContext.resetRow();
try {
// Since there is no concept of a group, we don't invoke
// startGroup/endGroup for a mapper
mo.process((Writable) value);
if (isLogInfoEnabled) {
logMemoryInfo();
}
} catch (Throwable e) {
abort = true;
Utilities.setMapWork(jc, null);
if (e instanceof OutOfMemoryError) {
// Don't create a new object if we are already out of memory
throw (OutOfMemoryError) e;
} else {
String msg = "Error processing row: " + e;
LOG.error(msg, e);
throw new RuntimeException(msg, e);
}
}
}
代码示例来源:origin: apache/hive
@Override
public void processRow(Object key, Object value) throws IOException {
if (!anyRow) {
OperatorUtils.setChildrenCollector(mo.getChildOperators(), oc);
anyRow = true;
}
// reset the execContext for each new row
execContext.resetRow();
try {
// Since there is no concept of a group, we don't invoke
// startGroup/endGroup for a mapper
mo.process((Writable) value);
incrementRowNumber();
} catch (Throwable e) {
abort = true;
Utilities.setMapWork(jc, null);
if (e instanceof OutOfMemoryError) {
// Don't create a new object if we are already out of memory
throw (OutOfMemoryError) e;
} else {
String msg = "Error processing row: " + e;
LOG.error(msg, e);
throw new RuntimeException(msg, e);
}
}
}
代码示例来源:origin: apache/drill
scratchDir, context, false);
Utilities.setInputPaths(cloned, inputPaths);
Utilities.setMapWork(cloned, (MapWork) work, scratchDir, false);
Utilities.createTmpDirs(cloned, (MapWork) work);
if (work instanceof MergeFileWork) {
代码示例来源:origin: apache/hive
Utilities.setMapWork(jconf, mapWork);
代码示例来源:origin: apache/hive
scratchDir, context, false);
Utilities.setInputPaths(cloned, inputPaths);
Utilities.setMapWork(cloned, mapWork, scratchDir, false);
Utilities.createTmpDirs(cloned, mapWork);
if (work instanceof MergeFileWork) {
代码示例来源:origin: apache/drill
Utilities.setMapWork(jconf, mapWork);
代码示例来源:origin: apache/hive
Utilities.setMapWork(jobConf, work);
try {
boolean sendSerializedEvents =
代码示例来源:origin: apache/drill
Utilities.setMapWork(jobConf, work);
try {
boolean sendSerializedEvents =
代码示例来源:origin: apache/drill
Utilities.setMapWork(conf, mapWork, mrScratchDir, false);
Utilities.setMapWork(conf, mapWork, mrScratchDir, false);
代码示例来源:origin: apache/hive
protected static void initialVectorizedRowBatchCtx(Configuration conf) throws HiveException {
MapWork mapWork = new MapWork();
VectorizedRowBatchCtx rbCtx = new VectorizedRowBatchCtx();
rbCtx.init(createStructObjectInspector(conf), new String[0]);
mapWork.setVectorMode(true);
mapWork.setVectorizedRowBatchCtx(rbCtx);
Utilities.setMapWork(conf, mapWork);
}
代码示例来源:origin: apache/hive
Utilities.setMapWork(jconf, mapWork);
代码示例来源:origin: apache/hive
Utilities.setMapWork(conf, mapWork, mrScratchDir, false);
Utilities.setMapWork(conf, mapWork, mrScratchDir, false);
代码示例来源:origin: apache/drill
Utilities.setMapWork(jconf, mapWork);
代码示例来源:origin: apache/hive
Utilities.setMapWork(job, work, ctx.getMRTmpPath(), true);
代码示例来源:origin: apache/drill
Utilities.setMapWork(job, work, ctx.getMRTmpPath(), true);
代码示例来源:origin: apache/hive
mapWork.setVectorizedRowBatchCtx(vrbContext);
HiveConf.setVar(conf, HiveConf.ConfVars.PLAN, "//tmp");
Utilities.setMapWork(conf, mapWork);
代码示例来源:origin: apache/hive
mapWork.setVectorizedRowBatchCtx(vrbContext);
HiveConf.setVar(conf, HiveConf.ConfVars.PLAN, "//tmp");
Utilities.setMapWork(conf, mapWork);
代码示例来源:origin: com.facebook.presto.hive/hive-apache
public static void setMapRedWork(Configuration conf, MapredWork w, Path hiveScratchDir) {
String useName = conf.get(INPUT_NAME);
if (useName == null) {
useName = "mapreduce";
}
conf.set(INPUT_NAME, useName);
setMapWork(conf, w.getMapWork(), hiveScratchDir, true);
if (w.getReduceWork() != null) {
conf.set(INPUT_NAME, useName);
setReduceWork(conf, w.getReduceWork(), hiveScratchDir, true);
}
}
内容来源于网络,如有侵权,请联系作者删除!