org.apache.hadoop.mapreduce.lib.output.MultipleOutputs.<init>()方法的使用及代码示例

x33g5p2x  于2022-01-25 转载在 其他  
字(7.2k)|赞(0)|评价(0)|浏览(125)

本文整理了Java中org.apache.hadoop.mapreduce.lib.output.MultipleOutputs.<init>()方法的一些代码示例,展示了MultipleOutputs.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。MultipleOutputs.<init>()方法的具体详情如下:
包路径:org.apache.hadoop.mapreduce.lib.output.MultipleOutputs
类名称:MultipleOutputs
方法名:<init>

MultipleOutputs.<init>介绍

[英]Creates and initializes multiple outputs support, it should be instantiated in the Mapper/Reducer setup method.
[中]创建并初始化多个输出支持,它应该在Mapper/Reducer设置方法中实例化。

代码示例

代码示例来源:origin: apache/kylin

@Override
protected void doSetup(Context context) throws IOException {
  super.bindCurrentConfiguration(context.getConfiguration());
  mos = new MultipleOutputs(context);
  String cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME);
  String segmentID = context.getConfiguration().get(BatchConstants.CFG_CUBE_SEGMENT_ID);
  KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata();
  CubeManager cubeManager = CubeManager.getInstance(config);
  CubeInstance cube = cubeManager.getCube(cubeName);
  CubeSegment optSegment = cube.getSegmentById(segmentID);
  CubeSegment originalSegment = cube.getOriginalSegmentToOptimize(optSegment);
  rowKeySplitter = new RowKeySplitter(originalSegment);
  baseCuboid = cube.getCuboidScheduler().getBaseCuboidId();
  recommendCuboids = cube.getCuboidsRecommend();
  Preconditions.checkNotNull(recommendCuboids, "The recommend cuboid map could not be null");
}

代码示例来源:origin: apache/kylin

@Override
protected void doSetup(Context context) throws IOException {
  super.bindCurrentConfiguration(context.getConfiguration());
  mos = new MultipleOutputs(context);
  String cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME);
  String segmentID = context.getConfiguration().get(BatchConstants.CFG_CUBE_SEGMENT_ID);
  KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata();
  CubeInstance cube = CubeManager.getInstance(config).getCube(cubeName);
  CubeSegment cubeSegment = cube.getSegmentById(segmentID);
  CubeSegment oldSegment = cube.getOriginalSegmentToOptimize(cubeSegment);
  cubeDesc = cube.getDescriptor();
  baseCuboid = cube.getCuboidScheduler().getBaseCuboidId();
  rowKeySplitter = new RowKeySplitter(oldSegment);
  rowKeyEncoderProvider = new RowKeyEncoderProvider(cubeSegment);
}

代码示例来源:origin: apache/kylin

super.bindCurrentConfiguration(context.getConfiguration());
Configuration conf = context.getConfiguration();
mos = new MultipleOutputs(context);

代码示例来源:origin: apache/kylin

@Override
protected void doSetup(Context context) throws IOException {
  super.bindCurrentConfiguration(context.getConfiguration());
  Configuration conf = context.getConfiguration();
  mos = new MultipleOutputs(context);
  KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata();
  String cubeName = conf.get(BatchConstants.CFG_CUBE_NAME);
  CubeInstance cube = CubeManager.getInstance(config).getCube(cubeName);
  CubeDesc cubeDesc = cube.getDescriptor();
  List<TblColRef> uhcColumns = cubeDesc.getAllUHCColumns();
  int taskId = context.getTaskAttemptID().getTaskID().getId();
  col = uhcColumns.get(taskId);
  logger.info("column name: " + col.getIdentity());
  if (cube.getDescriptor().getShardByColumns().contains(col)) {
    //for ShardByColumns
    builder = DictionaryGenerator.newDictionaryBuilder(col.getType());
    builder.init(null, 0, null);
  } else {
    //for GlobalDictionaryColumns
    String hdfsDir = conf.get(BatchConstants.CFG_GLOBAL_DICT_BASE_DIR);
    DictionaryInfo dictionaryInfo = new DictionaryInfo(col.getColumnDesc(), col.getDatatype());
    String builderClass = cubeDesc.getDictionaryBuilderClass(col);
    builder = (IDictionaryBuilder) ClassUtil.newInstance(builderClass);
    builder.init(dictionaryInfo, 0, hdfsDir);
  }
}

代码示例来源:origin: pl.edu.icm.coansys/coansys-io-input

@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public void setup(Context context) {
  mos = new MultipleOutputs(context);
}

代码示例来源:origin: openimaj/openimaj

@Override
  protected void setup(Reducer<MAP_OUTPUT_KEY,MAP_OUTPUT_VALUE,OUTPUT_KEY,OUTPUT_VALUE>.Context context) throws IOException ,InterruptedException {
    this.multiOut = new MultipleOutputs<OUTPUT_KEY,OUTPUT_VALUE>(context);
  };
}

代码示例来源:origin: org.openimaj/core-hadoop

@Override
  protected void setup(Reducer<MAP_OUTPUT_KEY,MAP_OUTPUT_VALUE,OUTPUT_KEY,OUTPUT_VALUE>.Context context) throws IOException ,InterruptedException {
    this.multiOut = new MultipleOutputs<OUTPUT_KEY,OUTPUT_VALUE>(context);
  };
}

代码示例来源:origin: apache/incubator-rya

@Override
public void setup(Context context) {
  mout = new MultipleOutputs<>(context);
}
@Override

代码示例来源:origin: hortonworks/hive-testbench

protected void setup(Context context) throws IOException {
 mos = new MultipleOutputs(context);
}
protected void cleanup(Context context) throws IOException, InterruptedException {

代码示例来源:origin: cartershanklin/hive-testbench

protected void setup(Context context) throws IOException {
 mos = new MultipleOutputs(context);
}
protected void cleanup(Context context) throws IOException, InterruptedException {

代码示例来源:origin: pl.edu.icm.coansys/coansys-io-input

@Override
public void setup(Context context) {
  mos = new MultipleOutputs<>(context);
}

代码示例来源:origin: thinkaurelius/faunus

public SafeMapperOutputs(final Mapper.Context context) {
  this.context = context;
  this.outputs = new MultipleOutputs(this.context);
  this.testing = this.context.getConfiguration().getBoolean(FaunusCompiler.TESTING, false);
}

代码示例来源:origin: openimaj/openimaj

@Override
protected void setup(Context context) throws IOException, InterruptedException
{
  indexer = VLADIndexerData.read(new File("vlad-data.bin"));
  mos = new MultipleOutputs<Text, BytesWritable>(context);
}

代码示例来源:origin: thinkaurelius/faunus

public SafeReducerOutputs(final Reducer.Context context) {
  this.context = context;
  this.outputs = new MultipleOutputs(this.context);
  this.testing = this.context.getConfiguration().getBoolean(FaunusCompiler.TESTING, false);
}

代码示例来源:origin: apache/incubator-rya

@Override
public void setup(Context context) {
  Configuration conf = context.getConfiguration();
  debug = MRReasoningUtils.debug(conf);
  if (debug) {
    debugOut = new MultipleOutputs<>(context);
  }
}
@Override

代码示例来源:origin: apache/incubator-rya

@Override
protected void setup(Context context) {
  debugOut = new MultipleOutputs<>(context);
  Configuration conf = context.getConfiguration();
  if (schema == null) {
    schema = MRReasoningUtils.loadSchema(context.getConfiguration());
  }
  debug = MRReasoningUtils.debug(conf);
}
@Override

代码示例来源:origin: pl.edu.icm.coansys/coansys-io-input

@Override
protected void setup(Context context) throws IOException, InterruptedException {
  super.setup(context);
  mos = new MultipleOutputs(context);
  mainOutputsDir = context.getConfiguration().get("decided.dir");
  undecidedDir =context.getConfiguration().get("undecided.dir");
}

代码示例来源:origin: apache/incubator-rya

@Override
public void setup(Context context) {
  mout = new MultipleOutputs<>(context);
  Configuration conf = context.getConfiguration();
  if (schema == null) {
    schema = MRReasoningUtils.loadSchema(conf);
  }
  debug = MRReasoningUtils.debug(conf);
}
@Override

代码示例来源:origin: apache/incubator-rya

@Override
public void setup(Context context) {
  Configuration conf = context.getConfiguration();
  mout = new MultipleOutputs<>(context);
  current = MRReasoningUtils.getCurrentIteration(conf);
  debug = MRReasoningUtils.debug(conf);
}
@Override

代码示例来源:origin: apache/incubator-rya

@Override
protected void setup(Context context) {
  schema = new SchemaWritable();
  debug = MRReasoningUtils.debug(context.getConfiguration());
  debugOut = new MultipleOutputs<>(context);
}

相关文章