org.apache.hadoop.hdfs.server.datanode.DataNode.notifyNamenodeDeletedBlock()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(7.9k)|赞(0)|评价(0)|浏览(168)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.notifyNamenodeDeletedBlock()方法的一些代码示例,展示了DataNode.notifyNamenodeDeletedBlock()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.notifyNamenodeDeletedBlock()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:notifyNamenodeDeletedBlock

DataNode.notifyNamenodeDeletedBlock介绍

[英]Notify the corresponding namenode to delete the block.
[中]通知相应的namenode删除该块。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Invalidate a block but does not delete the actual on-disk block file.
 *
 * It should only be used when deactivating disks.
 *
 * @param bpid the block pool ID.
 * @param block The block to be invalidated.
 */
public void invalidate(String bpid, ReplicaInfo block) {
 // If a DFSClient has the replica in its cache of short-circuit file
 // descriptors (and the client is using ShortCircuitShm), invalidate it.
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   new ExtendedBlockId(block.getBlockId(), bpid));
 // If the block is cached, start uncaching it.
 cacheManager.uncacheBlock(bpid, block.getBlockId());
 datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block),
   block.getStorageUuid());
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

.notifyNamenodeDeletedBlock(block, replica.getStorageUuid());
datanode.data.invalidate(block.getBlockPoolId(),
  new Block[] {block.getLocalBlock()});

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
 public void run() {
  final long blockLength = replicaToDelete.getBlockDataLength();
  final long metaLength = replicaToDelete.getMetadataLength();
  boolean result;
  result = (trashDirectory == null) ? deleteFiles() : moveFiles();
  if (!result) {
   LOG.warn("Unexpected error trying to "
     + (trashDirectory == null ? "delete" : "move")
     + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
     + " at file " + replicaToDelete.getBlockURI() + ". Ignored.");
  } else {
   if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
    datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
   }
   volume.onBlockFileDeletion(block.getBlockPoolId(), blockLength);
   volume.onMetaFileDeletion(block.getBlockPoolId(), metaLength);
   LOG.info("Deleted " + block.getBlockPoolId() + " "
     + block.getLocalBlock() + " URI " + replicaToDelete.getBlockURI());
  }
  updateDeletedBlockId(block);
  IOUtils.cleanup(null, volumeRef);
 }
}

代码示例来源:origin: linkedin/dynamometer

@Override // FsDatasetSpi
public synchronized void invalidate(String bpid, Block[] invalidBlks)
  throws IOException {
 boolean error = false;
 if (invalidBlks == null) {
  return;
 }
 for (Block b: invalidBlks) {
  if (b == null) {
   continue;
  }
  Map<Block, BInfo> map = getBlockMap(b, bpid);
  BInfo binfo = map.get(b);
  if (binfo == null) {
   error = true;
   DataNode.LOG.warn("Invalidate: Missing block");
   continue;
  }
  getStorage(b).free(bpid, binfo.getNumBytes());
  map.remove(b);
  if (datanode != null) {
   datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, b),
     binfo.getStorageUuid());
  }
 }
 if (error) {
  throw new IOException("Invalidate: Missing blocks.");
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

datanode.notifyNamenodeDeletedBlock(namespaceId, invalidBlks[i]);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
 public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;
  result = (trashDirectory == null) ? deleteFiles() : moveFiles();
  if (!result) {
   LOG.warn("Unexpected error trying to "
     + (trashDirectory == null ? "delete" : "move")
     + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
     + " at file " + blockFile + ". Ignored.");
  } else {
   if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
    datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
   }
   volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
   LOG.info("Deleted " + block.getBlockPoolId() + " "
     + block.getLocalBlock() + " file " + blockFile);
  }
  updateDeletedBlockId(block);
  IOUtils.cleanup(null, volumeRef);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override // FsDatasetSpi
public synchronized void invalidate(String bpid, Block[] invalidBlks)
  throws IOException {
 boolean error = false;
 if (invalidBlks == null) {
  return;
 }
 final Map<Block, BInfo> map = getMap(bpid);
 for (Block b: invalidBlks) {
  if (b == null) {
   continue;
  }
  BInfo binfo = map.get(b);
  if (binfo == null) {
   error = true;
   DataNode.LOG.warn("Invalidate: Missing block");
   continue;
  }
  storage.free(bpid, binfo.getNumBytes());
  map.remove(b);
  if (datanode != null) {
   datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, b),
     binfo.getStorageUuid());
  }
 }
 if (error) {
  throw new IOException("Invalidate: Missing blocks.");
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Invalidate a block but does not delete the actual on-disk block file.
 *
 * It should only be used when deactivating disks.
 *
 * @param bpid the block pool ID.
 * @param block The block to be invalidated.
 */
public void invalidate(String bpid, ReplicaInfo block) {
 // If a DFSClient has the replica in its cache of short-circuit file
 // descriptors (and the client is using ShortCircuitShm), invalidate it.
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   new ExtendedBlockId(block.getBlockId(), bpid));
 // If the block is cached, start uncaching it.
 cacheManager.uncacheBlock(bpid, block.getBlockId());
 datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block),
   block.getStorageUuid());
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Invalidate a block but does not delete the actual on-disk block file.
 *
 * It should only be used when deactivating disks.
 *
 * @param bpid the block pool ID.
 * @param block The block to be invalidated.
 */
public void invalidate(String bpid, ReplicaInfo block) {
 // If a DFSClient has the replica in its cache of short-circuit file
 // descriptors (and the client is using ShortCircuitShm), invalidate it.
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   new ExtendedBlockId(block.getBlockId(), bpid));
 // If the block is cached, start uncaching it.
 cacheManager.uncacheBlock(bpid, block.getBlockId());
 datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block),
   block.getStorageUuid());
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override
 public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;
  result = (trashDirectory == null) ? deleteFiles() : moveFiles();
  if (!result) {
   LOG.warn("Unexpected error trying to "
     + (trashDirectory == null ? "delete" : "move")
     + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
     + " at file " + blockFile + ". Ignored.");
  } else {
   if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
    datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
   }
   volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
   LOG.info("Deleted " + block.getBlockPoolId() + " "
     + block.getLocalBlock() + " file " + blockFile);
  }
  updateDeletedBlockId(block);
  IOUtils.cleanup(null, volumeRef);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Verify that the DataNode sends a single incremental block report for all
 * storages.
 * @throws IOException
 * @throws InterruptedException
 */
@Test (timeout=60000)
public void testDataNodeDoesNotSplitReports()
  throws IOException, InterruptedException {
 LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
 assertThat(cluster.getDataNodes().size(), is(1));
 // Remove all blocks from the DataNode.
 for (LocatedBlock block : blocks.getLocatedBlocks()) {
  dn0.notifyNamenodeDeletedBlock(
    block.getBlock(), block.getStorageIDs()[0]);
 }
 LOG.info("Triggering report after deleting blocks");
 long ops = getLongCounter("BlockReceivedAndDeletedOps", getMetrics(NN_METRICS));
 // Trigger a report to the NameNode and give it a few seconds.
 DataNodeTestUtils.triggerBlockReport(dn0);
 Thread.sleep(5000);
 // Ensure that NameNodeRpcServer.blockReceivedAndDeletes is invoked
 // exactly once after we triggered the report.
 assertCounter("BlockReceivedAndDeletedOps", ops+1, getMetrics(NN_METRICS));
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

dn.notifyNamenodeDeletedBlock(eb, storage.getStorageID());
DataNodeTestUtils.triggerDeletionReport(dn);
assertTrue(dnd.isRegistered());
dn.notifyNamenodeDeletedBlock(eb, storage.getStorageID());
DataNodeTestUtils.triggerDeletionReport(dn);
assertFalse(dnd.isRegistered());

相关文章

DataNode类方法