org.apache.hadoop.mapreduce.lib.output.MultipleOutputs.getRecordWriter()方法的使用及代码示例

x33g5p2x  于2022-01-25 转载在 其他  
字(8.5k)|赞(0)|评价(0)|浏览(138)

本文整理了Java中org.apache.hadoop.mapreduce.lib.output.MultipleOutputs.getRecordWriter()方法的一些代码示例,展示了MultipleOutputs.getRecordWriter()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。MultipleOutputs.getRecordWriter()方法的具体详情如下:
包路径:org.apache.hadoop.mapreduce.lib.output.MultipleOutputs
类名称:MultipleOutputs
方法名:getRecordWriter

MultipleOutputs.getRecordWriter介绍

暂无

代码示例

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-core

/**
 * Write key and value to baseOutputPath using the namedOutput.
 * 
 * @param namedOutput    the named output name
 * @param key            the key
 * @param value          the value
 * @param baseOutputPath base-output path to write the record to.
 * Note: Framework will generate unique filename for the baseOutputPath
 */
@SuppressWarnings("unchecked")
public <K, V> void write(String namedOutput, K key, V value,
  String baseOutputPath) throws IOException, InterruptedException {
 checkNamedOutputName(context, namedOutput, false);
 checkBaseOutputPath(baseOutputPath);
 if (!namedOutputs.contains(namedOutput)) {
  throw new IllegalArgumentException("Undefined named output '" +
   namedOutput + "'");
 }
 TaskAttemptContext taskContext = getContext(namedOutput);
 getRecordWriter(taskContext, baseOutputPath).write(key, value);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

/**
 * Write key and value to baseOutputPath using the namedOutput.
 * 
 * @param namedOutput    the named output name
 * @param key            the key
 * @param value          the value
 * @param baseOutputPath base-output path to write the record to.
 * Note: Framework will generate unique filename for the baseOutputPath
 */
@SuppressWarnings("unchecked")
public <K, V> void write(String namedOutput, K key, V value,
  String baseOutputPath) throws IOException, InterruptedException {
 checkNamedOutputName(context, namedOutput, false);
 checkBaseOutputPath(baseOutputPath);
 if (!namedOutputs.contains(namedOutput)) {
  throw new IllegalArgumentException("Undefined named output '" +
   namedOutput + "'");
 }
 TaskAttemptContext taskContext = getContext(namedOutput);
 getRecordWriter(taskContext, baseOutputPath).write(key, value);
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Write key and value to baseOutputPath using the namedOutput.
 * 
 * @param namedOutput    the named output name
 * @param key            the key
 * @param value          the value
 * @param baseOutputPath base-output path to write the record to.
 * Note: Framework will generate unique filename for the baseOutputPath
 */
@SuppressWarnings("unchecked")
public <K, V> void write(String namedOutput, K key, V value,
  String baseOutputPath) throws IOException, InterruptedException {
 checkNamedOutputName(context, namedOutput, false);
 checkBaseOutputPath(baseOutputPath);
 if (!namedOutputs.contains(namedOutput)) {
  throw new IllegalArgumentException("Undefined named output '" +
   namedOutput + "'");
 }
 TaskAttemptContext taskContext = getContext(namedOutput);
 getRecordWriter(taskContext, baseOutputPath).write(key, value);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core

/**
 * Write key and value to baseOutputPath using the namedOutput.
 * 
 * @param namedOutput    the named output name
 * @param key            the key
 * @param value          the value
 * @param baseOutputPath base-output path to write the record to.
 * Note: Framework will generate unique filename for the baseOutputPath
 */
@SuppressWarnings("unchecked")
public <K, V> void write(String namedOutput, K key, V value,
  String baseOutputPath) throws IOException, InterruptedException {
 checkNamedOutputName(context, namedOutput, false);
 checkBaseOutputPath(baseOutputPath);
 if (!namedOutputs.contains(namedOutput)) {
  throw new IllegalArgumentException("Undefined named output '" +
   namedOutput + "'");
 }
 TaskAttemptContext taskContext = getContext(namedOutput);
 getRecordWriter(taskContext, baseOutputPath).write(key, value);
}

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

/**
 * Write key and value to baseOutputPath using the namedOutput.
 * 
 * @param namedOutput    the named output name
 * @param key            the key
 * @param value          the value
 * @param baseOutputPath base-output path to write the record to.
 * Note: Framework will generate unique filename for the baseOutputPath
 * <b>Warning</b>: when the baseOutputPath is a path that resolves
 * outside of the final job output directory, the directory is created
 * immediately and then persists through subsequent task retries, breaking
 * the concept of output committing.
 */
@SuppressWarnings("unchecked")
public <K, V> void write(String namedOutput, K key, V value,
  String baseOutputPath) throws IOException, InterruptedException {
 checkNamedOutputName(context, namedOutput, false);
 checkBaseOutputPath(baseOutputPath);
 if (!namedOutputs.contains(namedOutput)) {
  throw new IllegalArgumentException("Undefined named output '" +
   namedOutput + "'");
 }
 TaskAttemptContext taskContext = getContext(namedOutput);
 getRecordWriter(taskContext, baseOutputPath).write(key, value);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core

/**
 * Write key value to an output file name.
 * 
 * Gets the record writer from job's output format.  
 * Job's output format should be a FileOutputFormat.
 * 
 * @param key       the key
 * @param value     the value
 * @param baseOutputPath base-output path to write the record to.
 * Note: Framework will generate unique filename for the baseOutputPath
 */
@SuppressWarnings("unchecked")
public void write(KEYOUT key, VALUEOUT value, String baseOutputPath) 
  throws IOException, InterruptedException {
 checkBaseOutputPath(baseOutputPath);
 if (jobOutputFormatContext == null) {
  jobOutputFormatContext = 
   new TaskAttemptContextImpl(context.getConfiguration(), 
                 context.getTaskAttemptID(),
                 new WrappedStatusReporter(context));
 }
 getRecordWriter(jobOutputFormatContext, baseOutputPath).write(key, value);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

/**
 * Write key value to an output file name.
 * 
 * Gets the record writer from job's output format.  
 * Job's output format should be a FileOutputFormat.
 * 
 * @param key       the key
 * @param value     the value
 * @param baseOutputPath base-output path to write the record to.
 * Note: Framework will generate unique filename for the baseOutputPath
 */
@SuppressWarnings("unchecked")
public void write(KEYOUT key, VALUEOUT value, String baseOutputPath) 
  throws IOException, InterruptedException {
 checkBaseOutputPath(baseOutputPath);
 TaskAttemptContext taskContext = 
  new TaskAttemptContextImpl(context.getConfiguration(), 
                context.getTaskAttemptID(),
                new WrappedStatusReporter(context));
 getRecordWriter(taskContext, baseOutputPath).write(key, value);
}

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

/**
 * Write key value to an output file name.
 * 
 * Gets the record writer from job's output format.  
 * Job's output format should be a FileOutputFormat.
 * 
 * @param key       the key
 * @param value     the value
 * @param baseOutputPath base-output path to write the record to.
 * Note: Framework will generate unique filename for the baseOutputPath
 * <b>Warning</b>: when the baseOutputPath is a path that resolves
 * outside of the final job output directory, the directory is created
 * immediately and then persists through subsequent task retries, breaking
 * the concept of output committing.
 */
@SuppressWarnings("unchecked")
public void write(KEYOUT key, VALUEOUT value, String baseOutputPath) 
  throws IOException, InterruptedException {
 checkBaseOutputPath(baseOutputPath);
 if (jobOutputFormatContext == null) {
  jobOutputFormatContext = 
   new TaskAttemptContextImpl(context.getConfiguration(), 
                 context.getTaskAttemptID(),
                 new WrappedStatusReporter(context));
 }
 getRecordWriter(jobOutputFormatContext, baseOutputPath).write(key, value);
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-core

/**
 * Write key value to an output file name.
 * 
 * Gets the record writer from job's output format.  
 * Job's output format should be a FileOutputFormat.
 * 
 * @param key       the key
 * @param value     the value
 * @param baseOutputPath base-output path to write the record to.
 * Note: Framework will generate unique filename for the baseOutputPath
 */
@SuppressWarnings("unchecked")
public void write(KEYOUT key, VALUEOUT value, String baseOutputPath) 
  throws IOException, InterruptedException {
 checkBaseOutputPath(baseOutputPath);
 if (jobOutputFormatContext == null) {
  jobOutputFormatContext = 
   new TaskAttemptContextImpl(context.getConfiguration(), 
                 context.getTaskAttemptID(),
                 new WrappedStatusReporter(context));
 }
 getRecordWriter(jobOutputFormatContext, baseOutputPath).write(key, value);
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Write key value to an output file name.
 * 
 * Gets the record writer from job's output format.  
 * Job's output format should be a FileOutputFormat.
 * 
 * @param key       the key
 * @param value     the value
 * @param baseOutputPath base-output path to write the record to.
 * Note: Framework will generate unique filename for the baseOutputPath
 */
@SuppressWarnings("unchecked")
public void write(KEYOUT key, VALUEOUT value, String baseOutputPath) 
  throws IOException, InterruptedException {
 checkBaseOutputPath(baseOutputPath);
 if (jobOutputFormatContext == null) {
  jobOutputFormatContext = 
   new TaskAttemptContextImpl(context.getConfiguration(), 
                 context.getTaskAttemptID(),
                 new WrappedStatusReporter(context));
 }
 getRecordWriter(jobOutputFormatContext, baseOutputPath).write(key, value);
}

相关文章