org.apache.hadoop.mapreduce.Counter.increment()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(12.0k)|赞(0)|评价(0)|浏览(107)

本文整理了Java中org.apache.hadoop.mapreduce.Counter.increment()方法的一些代码示例,展示了Counter.increment()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Counter.increment()方法的具体详情如下:
包路径:org.apache.hadoop.mapreduce.Counter
类名称:Counter
方法名:increment

Counter.increment介绍

[英]Increment this counter by the given value
[中]按给定值递增此计数器

代码示例

代码示例来源:origin: apache/hbase

/**
 * Used by TestExportSnapshot to test for retries when failures happen.
 * Failure is injected in {@link #copyFile(Context, SnapshotFileInfo, Path)}.
 */
private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo)
  throws IOException {
 if (!context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false)) return;
 if (testing.injectedFailureCount >= testing.failuresCountToInject) return;
 testing.injectedFailureCount++;
 context.getCounter(Counter.COPY_FAILED).increment(1);
 LOG.debug("Injecting failure. Count: " + testing.injectedFailureCount);
 throw new IOException(String.format("TEST FAILURE (%d of max %d): Unable to copy input=%s",
   testing.injectedFailureCount, testing.failuresCountToInject, inputInfo));
}

代码示例来源:origin: apache/hbase

/**
  * Maps the data.
  *
  * @param row  The current table row key.
  * @param values  The columns.
  * @param context  The current context.
  * @throws IOException When something is broken with the data.
  * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, Context)
  */
 @Override
 public void map(ImmutableBytesWritable row, Result values,
  Context context)
 throws IOException {
  // Count every row containing data, whether it's in qualifiers or values
  context.getCounter(Counters.ROWS).increment(1);
 }
}

代码示例来源:origin: apache/incubator-druid

@Override
 protected void innerMap(
   InputRow inputRow,
   Context context
 ) throws IOException, InterruptedException
 {
  final List<Object> groupKey = Rows.toGroupKey(
    rollupGranularity.bucketStart(inputRow.getTimestamp()).getMillis(),
    inputRow
  );
  context.write(
    new BytesWritable(HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsBytes(groupKey)),
    NullWritable.get()
  );
  context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).increment(1);
 }
}

代码示例来源:origin: apache/hbase

context.getCounter(c).increment(0);

代码示例来源:origin: apache/incubator-druid

private void handleParseException(ParseException pe, Context context)
{
 context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.INVALID_ROW_COUNTER).increment(1);
 Counter unparseableCounter = context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_UNPARSEABLE_COUNTER);
 Counter processedWithErrorsCounter = context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_WITH_ERRORS_COUNTER);
 if (pe.isFromPartiallyValidRow()) {
  processedWithErrorsCounter.increment(1);
 } else {
  unparseableCounter.increment(1);
 }
 if (config.isLogParseExceptions()) {
  log.error(pe, "Encountered parse exception: ");
 }
 long rowsUnparseable = unparseableCounter.getValue();
 long rowsProcessedWithError = processedWithErrorsCounter.getValue();
 if (rowsUnparseable + rowsProcessedWithError > config.getMaxParseExceptions()) {
  log.error("Max parse exceptions exceeded, terminating task...");
  throw new RuntimeException("Max parse exceptions exceeded, terminating task...", pe);
 }
}

代码示例来源:origin: apache/hbase

private void logFailRowAndIncreaseCounter(Context context, Counters counter, Result row) {
 if (sleepMsBeforeReCompare > 0) {
  Threads.sleep(sleepMsBeforeReCompare);
  try {
   Result sourceResult = sourceTable.get(new Get(row.getRow()));
   Result replicatedResult = replicatedTable.get(new Get(row.getRow()));
   Result.compareResults(sourceResult, replicatedResult);
   if (!sourceResult.isEmpty()) {
    context.getCounter(Counters.GOODROWS).increment(1);
    if (verbose) {
     LOG.info("Good row key (with recompare): " + delimiter + Bytes.toStringBinary(row.getRow())
     + delimiter);
    }
   }
   return;
  } catch (Exception e) {
   LOG.error("recompare fail after sleep, rowkey=" + delimiter +
     Bytes.toStringBinary(row.getRow()) + delimiter);
  }
 }
 context.getCounter(counter).increment(1);
 context.getCounter(Counters.BADROWS).increment(1);
 LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) +
   delimiter);
}

代码示例来源:origin: apache/hbase

if (outputStat != null && sameFile(inputStat, outputStat)) {
 LOG.info("Skip copy " + inputStat.getPath() + " to " + outputPath + ", same file.");
 context.getCounter(Counter.FILES_SKIPPED).increment(1);
 context.getCounter(Counter.BYTES_SKIPPED).increment(inputStat.getLen());
 return;
context.getCounter(Counter.BYTES_EXPECTED).increment(inputStat.getLen());

代码示例来源:origin: apache/hbase

context.getCounter(Counter.BYTES_COPIED).increment(reportBytes);
   context.setStatus(String.format(statusMessage,
            StringUtils.humanReadableInt(totalBytesWritten),
 context.getCounter(Counter.BYTES_COPIED).increment(reportBytes);
 context.setStatus(String.format(statusMessage,
          StringUtils.humanReadableInt(totalBytesWritten),
   " time=" + StringUtils.formatTimeDiff(etime, stime) +
   String.format(" %.3fM/sec", (totalBytesWritten / ((etime - stime)/1000.0))/1048576.0));
 context.getCounter(Counter.FILES_COPIED).increment(1);
} catch (IOException e) {
 LOG.error("Error copying " + inputPath + " to " + outputPath, e);
 context.getCounter(Counter.COPY_FAILED).increment(1);
 throw e;

代码示例来源:origin: apache/hbase

currentFamily = null;
currentQualifier = null;
context.getCounter(Counters.ROWS).increment(1);
context.write(new Text("Total ROWS"), new IntWritable(1));
  currentFamilyName = Bytes.toStringBinary(currentFamily);
  currentQualifier = null;
  context.getCounter("CF", currentFamilyName).increment(1);
  if (1 == context.getCounter("CF", currentFamilyName).getValue()) {
   context.write(new Text("Total Families Across all Rows"), new IntWritable(1));
   context.write(new Text(currentFamily), new IntWritable(1));
 context.write(new Text(currentRowQualifierName + "_Versions"), new IntWritable(1));
context.getCounter(Counters.CELLS).increment(cellCount);

代码示例来源:origin: apache/hbase

@Override
 protected void map(ImmutableBytesWritable key, Result value,
   Context context) throws IOException,
   InterruptedException {
  context.getCounter(ScanCounter.NUM_ROWS).increment(1);
  context.getCounter(ScanCounter.NUM_CELLS).increment(value.rawCells().length);
 }
}

代码示例来源:origin: apache/incubator-gobblin

@Override
protected void map(AvroKey<GenericRecord> key, NullWritable value, Context context)
  throws IOException, InterruptedException {
 if (context.getNumReduceTasks() == 0) {
  context.write(key, NullWritable.get());
 } else {
  populateComparableKeyRecord(key.datum(), this.outKey.datum());
  this.outValue.datum(key.datum());
  try {
   context.write(this.outKey, this.outValue);
  } catch (AvroRuntimeException e) {
   final Path[] paths = ((CombineFileSplit) context.getInputSplit()).getPaths();
   throw new IOException("Unable to process paths " + StringUtils.join(paths, ','), e);
  }
 }
 context.getCounter(EVENT_COUNTER.RECORD_COUNT).increment(1);
}

代码示例来源:origin: apache/kylin

context.getCounter(BatchConstants.MAPREDUCE_COUNTER_GROUP_NAME, "Skipped records").increment(1L);
  if (skipCounter++ % BatchConstants.NORMAL_RECORD_LOG_THRESHOLD == 0) {
    logger.info("Skipping record with ordinal: " + skipCounter);
context.getCounter(BatchConstants.MAPREDUCE_COUNTER_GROUP_NAME, "Processed records").increment(1L);
  result = ndCuboidBuilder.buildKey(parentCuboid, childCuboid, rowKeySplitter.getSplitBuffers());
  outputKey.set(result.getSecond().array(), 0, result.getFirst());
  context.write(outputKey, value);

代码示例来源:origin: apache/incubator-druid

);
context.write(
  new SortableBytes(
    bucket.get().toGroupKey(),
 throw pe;
} else {
 context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).increment(1);

代码示例来源:origin: apache/incubator-druid

.add(hashFunction.hashBytes(HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsBytes(groupKey)).asBytes());
context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).increment(1);

代码示例来源:origin: apache/ignite

/** {@inheritDoc} */
  @Override public void map(Object key, Text val, Context ctx) throws IOException, InterruptedException {
    super.map(key, val, ctx);
    ctx.getCounter(TestCounter.COUNTER1).increment(1);
  }
}

代码示例来源:origin: apache/incubator-druid

context.getCounter(COUNTER_GROUP, COUNTER_LOADED).increment(inSize);
final String finalSegmentString = HadoopDruidConverterConfig.jsonMapper.writeValueAsString(finalSegment);
context.getConfiguration().set(ConvertingOutputFormat.PUBLISHED_SEGMENT_KEY, finalSegmentString);
context.write(new Text("dataSegment"), new Text(finalSegmentString));
context.getCounter(COUNTER_GROUP, COUNTER_WRITTEN).increment(finalSegment.getSize());
context.progress();
context.setStatus("Ready To Commit");

代码示例来源:origin: apache/hbase

/**
 * Finish the currently open hash batch.
 * Compare the target hash to the given source hash.
 * If they do not match, then sync the covered key range.
 */
private void finishBatchAndCompareHashes(Context context)
  throws IOException, InterruptedException {
 targetHasher.finishBatch();
 context.getCounter(Counter.BATCHES).increment(1);
 if (targetHasher.getBatchSize() == 0) {
  context.getCounter(Counter.EMPTY_BATCHES).increment(1);
 }
 ImmutableBytesWritable targetHash = targetHasher.getBatchHash();
 if (targetHash.equals(currentSourceHash)) {
  context.getCounter(Counter.HASHES_MATCHED).increment(1);
 } else {
  context.getCounter(Counter.HASHES_NOT_MATCHED).increment(1);
  ImmutableBytesWritable stopRow = nextSourceKey == null
                   ? new ImmutableBytesWritable(sourceTableHash.stopRow)
                   : nextSourceKey;
  if (LOG.isDebugEnabled()) {
   LOG.debug("Hash mismatch.  Key range: " + toHex(targetHasher.getBatchStartKey())
     + " to " + toHex(stopRow)
     + " sourceHash: " + toHex(currentSourceHash)
     + " targetHash: " + toHex(targetHash));
  }
  syncRange(context, targetHasher.getBatchStartKey(), stopRow);
 }
}
private static String toHex(ImmutableBytesWritable bytes) {

代码示例来源:origin: apache/hbase

private FileStatus getSourceFileStatus(Context context, final SnapshotFileInfo fileInfo)
  throws IOException {
 try {
  Configuration conf = context.getConfiguration();
  FileLink link = null;
  switch (fileInfo.getType()) {
   case HFILE:
    Path inputPath = new Path(fileInfo.getHfile());
    link = getFileLink(inputPath, conf);
    break;
   case WAL:
    link = new WALLink(inputRoot, fileInfo.getWalServer(), fileInfo.getWalName());
    break;
   default:
    throw new IOException("Invalid File Type: " + fileInfo.getType().toString());
  }
  return link.getFileStatus(inputFs);
 } catch (FileNotFoundException e) {
  context.getCounter(Counter.MISSING_FILES).increment(1);
  LOG.error("Unable to get the status for source file=" + fileInfo.toString(), e);
  throw e;
 } catch (IOException e) {
  LOG.error("Unable to get the status for source file=" + fileInfo.toString(), e);
  throw e;
 }
}

代码示例来源:origin: apache/hbase

@Override
 protected void map(LongWritable key, Text value, final Context context)
     throws IOException, InterruptedException {
  Status status = new Status() {
   @Override
   public void setStatus(String msg) {
     context.setStatus(msg);
   }
  };
  ObjectMapper mapper = new ObjectMapper();
  TestOptions opts = mapper.readValue(value.toString(), TestOptions.class);
  Configuration conf = HBaseConfiguration.create(context.getConfiguration());
  final Connection con = ConnectionFactory.createConnection(conf);
  AsyncConnection asyncCon = null;
  try {
   asyncCon = ConnectionFactory.createAsyncConnection(conf).get();
  } catch (ExecutionException e) {
   throw new IOException(e);
  }
  // Evaluation task
  RunResult result = PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status);
  // Collect how much time the thing took. Report as map output and
  // to the ELAPSED_TIME counter.
  context.getCounter(Counter.ELAPSED_TIME).increment(result.duration);
  context.getCounter(Counter.ROWS).increment(opts.perClientRunRows);
  context.write(new LongWritable(opts.startRow), new LongWritable(result.duration));
  context.progress();
 }
}

代码示例来源:origin: apache/hbase

LOG.debug("Target missing cell: " + sourceCell);
  context.getCounter(Counter.TARGETMISSINGCELLS).increment(1);
  matchingRow = false;
   LOG.debug("Source missing cell: " + targetCell);
  context.getCounter(Counter.SOURCEMISSINGCELLS).increment(1);
  matchingRow = false;
        targetCell.getValueOffset(), targetCell.getValueLength()));
   context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1);
   matchingRow = false;
   context.write(new ImmutableBytesWritable(rowKey), put);
   put = null;
  context.write(new ImmutableBytesWritable(rowKey), put);
 context.getCounter(Counter.MATCHINGCELLS).increment(matchingCells);
 context.getCounter(Counter.MATCHINGROWS).increment(1);
 return true;
} else {
 context.getCounter(Counter.ROWSWITHDIFFS).increment(1);
 return false;

相关文章