org.apache.hadoop.hdfs.server.datanode.DataNode.now()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(4.6k)|赞(0)|评价(0)|浏览(186)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.now()方法的一些代码示例,展示了DataNode.now()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.now()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:now

DataNode.now介绍

[英]Current system time.
[中]当前系统时间。

代码示例

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public void run() {
  try {
   namenode.keepAlive(dnRegistration);
   ns.lastBeingAlive = now();
   LOG.debug("Sent heartbeat at " + ns.lastBeingAlive);
  } catch (Throwable ex) {
   LOG.error("Error sending keepAlive to the namenode", ex);
  }
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 * @return true if any namespace thread has heartbeat with namenode recently
 */
public boolean isDataNodeBeingAlive() {
 for (NamespaceService nsos: namespaceManager.getAllNamenodeThreads()) {
  if (nsos != null && 
    nsos.lastBeingAlive >= now() - heartbeatExpireInterval) {
   return true;
  }
 }
 return false;
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

long startTime = now();
                        xmitsInProgress.get(),
                        getXceiverCount());
 this.lastBeingAlive = now();
 LOG.debug("Sent heartbeat at " + this.lastBeingAlive);
 myMetrics.heartbeats.inc(now() - startTime);
 long brStartTime = now();
 Block[] bReport = data.getBlockReport(namespaceId);
     new BlockReport(BlockListAsLongs.convertToArrayLongs(bReport)));
 firstBlockReportSent = true;
 long brTime = now() - brStartTime;
 myMetrics.blockReports.inc(brTime);
 LOG.info("BlockReport of " + bReport.length +
  lastBlockReport += (now() - lastBlockReport) / 
            blockReportInterval * blockReportInterval;

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

long startTime = DataNode.now();
switch ( op ) {
case DataTransferProtocol.OP_READ_BLOCK:
 readBlock( in );
 datanode.myMetrics.readBlockOp.inc(DataNode.now() - startTime);
 if (local)
  datanode.myMetrics.readsFromLocalClient.inc();
case DataTransferProtocol.OP_WRITE_BLOCK:
 writeBlock( in );
 datanode.myMetrics.writeBlockOp.inc(DataNode.now() - startTime);
 if (local)
  datanode.myMetrics.writesFromLocalClient.inc();
case DataTransferProtocol.OP_READ_METADATA:
 readMetadata( in );
 datanode.myMetrics.readMetadataOp.inc(DataNode.now() - startTime);
 break;
case DataTransferProtocol.OP_REPLACE_BLOCK: // for balancing purpose; send to a destination
 replaceBlock(in);
 datanode.myMetrics.replaceBlockOp.inc(DataNode.now() - startTime);
 break;
case DataTransferProtocol.OP_COPY_BLOCK:
 datanode.myMetrics.copyBlockOp.inc(DataNode.now() - startTime);
 break;
case DataTransferProtocol.OP_BLOCK_CHECKSUM: //get the checksum of a block
 getBlockChecksum(in);
 datanode.myMetrics.blockChecksumOp.inc(DataNode.now() - startTime);

代码示例来源:origin: io.fabric8/fabric-hadoop

long startTime = now();
                        xmitsInProgress.get(),
                        getXceiverCount());
 myMetrics.addHeartBeat(now() - startTime);
 if (!processCommand(cmds))
  continue;
 if (data.isAsyncBlockReportReady()) {
  long brCreateStartTime = now();
  Block[] bReport = data.retrieveAsyncBlockReport();
  long brSendStartTime = now();
  DatanodeCommand cmd = namenode.blockReport(dnRegistration,
      BlockListAsLongs.convertToArrayLongs(bReport));
  long brSendCost = now() - brSendStartTime;
  long brCreateCost = brSendStartTime - brCreateStartTime;
  myMetrics.addBlockReport(brSendCost);
   lastBlockReport += (now() - lastBlockReport) / 
             blockReportInterval * blockReportInterval;

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

long startTime = now();
                        xmitsInProgress,
                        getXceiverCount());
 myMetrics.heartbeats.inc(now() - startTime);
 long brStartTime = now();
 Block[] bReport = data.getBlockReport();
 DatanodeCommand cmd = namenode.blockReport(dnRegistration,
     BlockListAsLongs.convertToArrayLongs(bReport));
 long brTime = now() - brStartTime;
 myMetrics.blockReports.inc(brTime);
 LOG.info("BlockReport of " + bReport.length +
  lastBlockReport += (now() - lastBlockReport) / 
            blockReportInterval * blockReportInterval;

代码示例来源:origin: com.facebook.hadoop/hadoop-core

long startTime = DataNode.now();
switch ( op ) {
case DataTransferProtocol.OP_READ_BLOCK:
 readBlock( in, versionAndOpcode );
 datanode.myMetrics.readBlockOp.inc(DataNode.now() - startTime);
 if (local)
  datanode.myMetrics.readsFromLocalClient.inc();
case DataTransferProtocol.OP_READ_BLOCK_ACCELERATOR:
 readBlockAccelerator( in );
 datanode.myMetrics.readBlockOp.inc(DataNode.now() - startTime);
 if (local)
  datanode.myMetrics.readsFromLocalClient.inc();
case DataTransferProtocol.OP_WRITE_BLOCK:
 writeBlock( in, versionAndOpcode );
 datanode.myMetrics.writeBlockOp.inc(DataNode.now() - startTime);
 if (local)
  datanode.myMetrics.writesFromLocalClient.inc();
case DataTransferProtocol.OP_READ_METADATA:
 readMetadata( in );
 datanode.myMetrics.readMetadataOp.inc(DataNode.now() - startTime);
 break;
case DataTransferProtocol.OP_REPLACE_BLOCK: // for balancing purpose; send to a destination
 replaceBlock(in);
 datanode.myMetrics.replaceBlockOp.inc(DataNode.now() - startTime);
 break;
case DataTransferProtocol.OP_COPY_BLOCK:

相关文章

DataNode类方法