org.apache.hadoop.hdfs.server.datanode.DataNode.notifyNamenodeReceivedBlock()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(11.6k)|赞(0)|评价(0)|浏览(140)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.notifyNamenodeReceivedBlock()方法的一些代码示例,展示了DataNode.notifyNamenodeReceivedBlock()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.notifyNamenodeReceivedBlock()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:notifyNamenodeReceivedBlock

DataNode.notifyNamenodeReceivedBlock介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * After a block becomes finalized, a datanode increases metric counter,
 * notifies namenode, and adds it to the block scanner
 * @param block block to close
 * @param delHint hint on which excess block to delete
 * @param storageUuid UUID of the storage where block is stored
 */
void closeBlock(ExtendedBlock block, String delHint, String storageUuid,
  boolean isTransientStorage) {
 metrics.incrBlocksWritten();
 notifyNamenodeReceivedBlock(block, delHint, storageUuid,
   isTransientStorage);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Update replica with the new generation stamp and length.  
 */
@Override // InterDatanodeProtocol
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
  final long recoveryId, final long newBlockId, final long newLength)
  throws IOException {
 final Replica r = data.updateReplicaUnderRecovery(oldBlock,
   recoveryId, newBlockId, newLength);
 // Notify the namenode of the updated block info. This is important
 // for HA, since otherwise the standby node may lose track of the
 // block locations until the next block report.
 ExtendedBlock newBlock = new ExtendedBlock(oldBlock);
 newBlock.setGenerationStamp(recoveryId);
 newBlock.setBlockId(newBlockId);
 newBlock.setNumBytes(newLength);
 final String storageID = r.getStorageUuid();
 notifyNamenodeReceivedBlock(newBlock, null, storageID,
   r.isOnTransientStorage());
 return storageID;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Cleanup the old replica and notifies the NN about new replica.
 *
 * @param replicaInfo    - Old replica to be deleted
 * @param newReplicaInfo - New replica object
 * @param bpid           - block pool id
 */
private void removeOldReplica(ReplicaInfo replicaInfo,
  ReplicaInfo newReplicaInfo, final String bpid) {
 // Before deleting the files from old storage we must notify the
 // NN that the files are on the new storage. Else a blockReport from
 // the transient storage might cause the NN to think the blocks are lost.
 // Replicas must be evicted from client short-circuit caches, because the
 // storage will no longer be same, and thus will require validating
 // checksum.  This also stops a client from holding file descriptors,
 // which would prevent the OS from reclaiming the memory.
 ExtendedBlock extendedBlock =
   new ExtendedBlock(bpid, newReplicaInfo);
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   ExtendedBlockId.fromExtendedBlock(extendedBlock));
 datanode.notifyNamenodeReceivedBlock(
   extendedBlock, null, newReplicaInfo.getStorageUuid(),
   newReplicaInfo.isOnTransientStorage());
 // Remove the old replicas
 cleanupReplica(bpid, replicaInfo);
 // If deletion failed then the directory scanner will cleanup the blocks
 // eventually.
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

datanode.notifyNamenodeReceivedBlock(
  block, delHint, r.getStorageUuid(), r.isOnTransientStorage());

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * After a block becomes finalized, a datanode increases metric counter,
 * notifies namenode, and adds it to the block scanner
 * @param block block to close
 * @param delHint hint on which excess block to delete
 * @param storageUuid UUID of the storage where block is stored
 */
void closeBlock(ExtendedBlock block, String delHint, String storageUuid,
  boolean isTransientStorage) {
 metrics.incrBlocksWritten();
 notifyNamenodeReceivedBlock(block, delHint, storageUuid,
   isTransientStorage);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * After a block becomes finalized, a datanode increases metric counter,
 * notifies namenode, and adds it to the block scanner
 * @param block block to close
 * @param delHint hint on which excess block to delete
 * @param storageUuid UUID of the storage where block is stored
 */
void closeBlock(ExtendedBlock block, String delHint, String storageUuid,
  boolean isTransientStorage) {
 metrics.incrBlocksWritten();
 notifyNamenodeReceivedBlock(block, delHint, storageUuid,
   isTransientStorage);
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

datanode.data.finalizeBlock(block);
datanode.myMetrics.blocksWritten.inc();
datanode.notifyNamenodeReceivedBlock(block, 
  DataNode.EMPTY_DEL_HINT);
if (ClientTraceLog.isInfoEnabled() &&

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/** {@inheritDoc} */
public void updateBlock(int namespaceId, Block oldblock, Block newblock, boolean finalize) throws IOException {
 LOG.info("namespaceId: " + namespaceId 
   + ", oldblock=" + oldblock + "(length=" + oldblock.getNumBytes()
   + "), newblock=" + newblock + "(length=" + newblock.getNumBytes()
   + "), datanode=" + getDatanodeInfo());
 data.updateBlock(namespaceId, oldblock, newblock);
 if (finalize) {
  data.finalizeBlockIfNeeded(namespaceId, newblock);
  myMetrics.blocksWritten.inc();
  notifyNamenodeReceivedBlock(namespaceId, newblock, null);
  LOG.info("Received block " + newblock +
       " of size " + newblock.getNumBytes() +
       " as part of lease recovery.");
 }
}

代码示例来源:origin: io.fabric8/fabric-hadoop

/** {@inheritDoc} */
public void updateBlock(Block oldblock, Block newblock, boolean finalize) throws IOException {
 LOG.info("oldblock=" + oldblock + "(length=" + oldblock.getNumBytes()
   + "), newblock=" + newblock + "(length=" + newblock.getNumBytes()
   + "), datanode=" + dnRegistration.getName());
 data.updateBlock(oldblock, newblock);
 if (finalize) {
  data.finalizeBlockIfNeeded(newblock);
  myMetrics.incrBlocksWritten();
  notifyNamenodeReceivedBlock(newblock, EMPTY_DEL_HINT);
  LOG.info("Received " + newblock +
       " of size " + newblock.getNumBytes() +
       " as part of lease recovery");
 }
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/** {@inheritDoc} */
public void updateBlock(Block oldblock, Block newblock, boolean finalize) throws IOException {
 LOG.info("oldblock=" + oldblock + "(length=" + oldblock.getNumBytes()
   + "), newblock=" + newblock + "(length=" + newblock.getNumBytes()
   + "), datanode=" + dnRegistration.getName());
 data.updateBlock(oldblock, newblock);
 if (finalize) {
  data.finalizeBlock(newblock);
  myMetrics.blocksWritten.inc(); 
  notifyNamenodeReceivedBlock(newblock, EMPTY_DEL_HINT);
  LOG.info("Received block " + newblock +
       " of size " + newblock.getNumBytes() +
       " as part of lease recovery.");
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

datanode.notifyNamenodeReceivedBlock(namespaceId, block, sourceID);

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

datanode.notifyNamenodeReceivedBlock(block, sourceID);

代码示例来源:origin: com.facebook.hadoop/hadoop-core

datanode.data.finalizeBlock(namespaceId, block);
datanode.myMetrics.blocksWritten.inc();
datanode.notifyNamenodeReceivedBlock(namespaceId, block, null);
if (ClientTraceLog.isInfoEnabled() &&
  receiver.clientName.length() > 0) {

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

private void removeOldReplica(ReplicaInfo replicaInfo,
  ReplicaInfo newReplicaInfo, File blockFile, File metaFile,
  long blockFileUsed, long metaFileUsed, final String bpid) {
 // Before deleting the files from old storage we must notify the
 // NN that the files are on the new storage. Else a blockReport from
 // the transient storage might cause the NN to think the blocks are lost.
 // Replicas must be evicted from client short-circuit caches, because the
 // storage will no longer be same, and thus will require validating
 // checksum.  This also stops a client from holding file descriptors,
 // which would prevent the OS from reclaiming the memory.
 ExtendedBlock extendedBlock =
   new ExtendedBlock(bpid, newReplicaInfo);
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   ExtendedBlockId.fromExtendedBlock(extendedBlock));
 datanode.notifyNamenodeReceivedBlock(
   extendedBlock, null, newReplicaInfo.getStorageUuid(),
   newReplicaInfo.isOnTransientStorage());
 // Remove the old replicas
 if (blockFile.delete() || !blockFile.exists()) {
  ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, blockFileUsed);
  if (metaFile.delete() || !metaFile.exists()) {
   ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, metaFileUsed);
  }
 }
 // If deletion failed then the directory scanner will cleanup the blocks
 // eventually.
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private void removeOldReplica(ReplicaInfo replicaInfo,
  ReplicaInfo newReplicaInfo, File blockFile, File metaFile,
  long blockFileUsed, long metaFileUsed, final String bpid) {
 // Before deleting the files from old storage we must notify the
 // NN that the files are on the new storage. Else a blockReport from
 // the transient storage might cause the NN to think the blocks are lost.
 // Replicas must be evicted from client short-circuit caches, because the
 // storage will no longer be same, and thus will require validating
 // checksum.  This also stops a client from holding file descriptors,
 // which would prevent the OS from reclaiming the memory.
 ExtendedBlock extendedBlock =
   new ExtendedBlock(bpid, newReplicaInfo);
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   ExtendedBlockId.fromExtendedBlock(extendedBlock));
 datanode.notifyNamenodeReceivedBlock(
   extendedBlock, null, newReplicaInfo.getStorageUuid(),
   newReplicaInfo.isOnTransientStorage());
 // Remove the old replicas
 if (blockFile.delete() || !blockFile.exists()) {
  ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, blockFileUsed);
  if (metaFile.delete() || !metaFile.exists()) {
   ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, metaFileUsed);
  }
 }
 // If deletion failed then the directory scanner will cleanup the blocks
 // eventually.
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public Boolean call() throws Exception {
  try {
   if (crossDatanode) {
    data.copyBlockLocal(srcFileSystem, srcBlockFile,
      srcNamespaceId, srcBlock, dstNamespaceId, dstBlock);
   } else {
    data.copyBlockLocal(srcFileSystem,
      data.getBlockFile(srcNamespaceId, srcBlock),
      srcNamespaceId, srcBlock, dstNamespaceId, dstBlock);
   }
   dstBlock.setNumBytes(srcBlock.getNumBytes());
   notifyNamenodeReceivedBlock(dstNamespaceId, dstBlock, null);
   blockScanner.addBlock(dstNamespaceId, dstBlock);
  } catch (Exception e) {
   LOG.warn("Local block copy for src : " + srcBlock.getBlockName()
     + ", dst : " + dstBlock.getBlockName() + " failed", e);
   throw e;
  }
  return true;
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Update replica with the new generation stamp and length.  
 */
@Override // InterDatanodeProtocol
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
  final long recoveryId, final long newBlockId, final long newLength)
  throws IOException {
 final Replica r = data.updateReplicaUnderRecovery(oldBlock,
   recoveryId, newBlockId, newLength);
 // Notify the namenode of the updated block info. This is important
 // for HA, since otherwise the standby node may lose track of the
 // block locations until the next block report.
 ExtendedBlock newBlock = new ExtendedBlock(oldBlock);
 newBlock.setGenerationStamp(recoveryId);
 newBlock.setBlockId(newBlockId);
 newBlock.setNumBytes(newLength);
 final String storageID = r.getStorageUuid();
 notifyNamenodeReceivedBlock(newBlock, null, storageID,
   r.isOnTransientStorage());
 return storageID;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Update replica with the new generation stamp and length.  
 */
@Override // InterDatanodeProtocol
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
  final long recoveryId, final long newBlockId, final long newLength)
  throws IOException {
 final Replica r = data.updateReplicaUnderRecovery(oldBlock,
   recoveryId, newBlockId, newLength);
 // Notify the namenode of the updated block info. This is important
 // for HA, since otherwise the standby node may lose track of the
 // block locations until the next block report.
 ExtendedBlock newBlock = new ExtendedBlock(oldBlock);
 newBlock.setGenerationStamp(recoveryId);
 newBlock.setBlockId(newBlockId);
 newBlock.setNumBytes(newLength);
 final String storageID = r.getStorageUuid();
 notifyNamenodeReceivedBlock(newBlock, null, storageID,
   r.isOnTransientStorage());
 return storageID;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

datanode.notifyNamenodeReceivedBlock(
  block, delHint, r.getStorageUuid(), r.isOnTransientStorage());

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

datanode.notifyNamenodeReceivedBlock(
  block, delHint, r.getStorageUuid(), r.isOnTransientStorage());

相关文章

DataNode类方法