org.apache.hadoop.hdfs.server.datanode.DataNode.getShortCircuitRegistry()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(11.4k)|赞(0)|评价(0)|浏览(109)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getShortCircuitRegistry()方法的一些代码示例,展示了DataNode.getShortCircuitRegistry()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getShortCircuitRegistry()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getShortCircuitRegistry

DataNode.getShortCircuitRegistry介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private boolean shouldDefer() {
 /* If revocationTimeMs == 0, this is an immediate uncache request.
  * No clients were anchored at the time we made the request. */
 if (revocationTimeMs == 0) {
  return false;
 }
 /* Let's check if any clients still have this block anchored. */
 boolean anchored =
  !dataset.datanode.getShortCircuitRegistry().
    processBlockMunlockRequest(key);
 if (!anchored) {
  LOG.debug("Uncaching {} now that it is no longer in use " +
    "by any clients.", key);
  return false;
 }
 long delta = revocationTimeMs - Time.monotonicNow();
 if (delta < 0) {
  LOG.warn("Forcibly uncaching {} after {} " +
    "because client(s) {} refused to stop using it.", key,
    DurationFormatUtils.formatDurationHMS(revocationTimeMs),
    dataset.datanode.getShortCircuitRegistry().getClientNames(key));
  return false;
 }
 LOG.info("Replica {} still can't be uncached because some " +
   "clients continue to use it.  Will wait for {}", key,
   DurationFormatUtils.formatDurationHMS(delta));
 return true;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

boolean deferred = false;
if (!dataset.datanode.getShortCircuitRegistry().
    processBlockMunlockRequest(key)) {
 deferred = true;

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

dataset.datanode.getShortCircuitRegistry().processBlockMlockEvent(key);
numBlocksCached.addAndGet(1);
dataset.datanode.getMetrics().incrBlocksCached(1);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

datanode.getShortCircuitRegistry().processBlockInvalidation(
     new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Invalidate a block but does not delete the actual on-disk block file.
 *
 * It should only be used when deactivating disks.
 *
 * @param bpid the block pool ID.
 * @param block The block to be invalidated.
 */
public void invalidate(String bpid, ReplicaInfo block) {
 // If a DFSClient has the replica in its cache of short-circuit file
 // descriptors (and the client is using ShortCircuitShm), invalidate it.
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   new ExtendedBlockId(block.getBlockId(), bpid));
 // If the block is cached, start uncaching it.
 cacheManager.uncacheBlock(bpid, block.getBlockId());
 datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block),
   block.getStorageUuid());
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Cleanup the old replica and notifies the NN about new replica.
 *
 * @param replicaInfo    - Old replica to be deleted
 * @param newReplicaInfo - New replica object
 * @param bpid           - block pool id
 */
private void removeOldReplica(ReplicaInfo replicaInfo,
  ReplicaInfo newReplicaInfo, final String bpid) {
 // Before deleting the files from old storage we must notify the
 // NN that the files are on the new storage. Else a blockReport from
 // the transient storage might cause the NN to think the blocks are lost.
 // Replicas must be evicted from client short-circuit caches, because the
 // storage will no longer be same, and thus will require validating
 // checksum.  This also stops a client from holding file descriptors,
 // which would prevent the OS from reclaiming the memory.
 ExtendedBlock extendedBlock =
   new ExtendedBlock(bpid, newReplicaInfo);
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   ExtendedBlockId.fromExtendedBlock(extendedBlock));
 datanode.notifyNamenodeReceivedBlock(
   extendedBlock, null, newReplicaInfo.getStorageUuid(),
   newReplicaInfo.isOnTransientStorage());
 // Remove the old replicas
 cleanupReplica(bpid, replicaInfo);
 // If deletion failed then the directory scanner will cleanup the blocks
 // eventually.
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private boolean shouldDefer() {
 /* If revocationTimeMs == 0, this is an immediate uncache request.
  * No clients were anchored at the time we made the request. */
 if (revocationTimeMs == 0) {
  return false;
 }
 /* Let's check if any clients still have this block anchored. */
 boolean anchored =
  !dataset.datanode.getShortCircuitRegistry().
    processBlockMunlockRequest(key);
 if (!anchored) {
  LOG.debug("Uncaching {} now that it is no longer in use " +
    "by any clients.", key);
  return false;
 }
 long delta = revocationTimeMs - Time.monotonicNow();
 if (delta < 0) {
  LOG.warn("Forcibly uncaching {} after {} " +
    "because client(s) {} refused to stop using it.", key,
    DurationFormatUtils.formatDurationHMS(revocationTimeMs),
    dataset.datanode.getShortCircuitRegistry().getClientNames(key));
  return false;
 }
 LOG.info("Replica {} still can't be uncached because some " +
   "clients continue to use it.  Will wait for {}", key,
   DurationFormatUtils.formatDurationHMS(delta));
 return true;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

boolean deferred = false;
if (!dataset.datanode.getShortCircuitRegistry().
    processBlockMunlockRequest(key)) {
 deferred = true;

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

dataset.datanode.getShortCircuitRegistry().processBlockMlockEvent(key);
numBlocksCached.addAndGet(1);
dataset.datanode.getMetrics().incrBlocksCached(1);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private void removeOldReplica(ReplicaInfo replicaInfo,
  ReplicaInfo newReplicaInfo, File blockFile, File metaFile,
  long blockFileUsed, long metaFileUsed, final String bpid) {
 // Before deleting the files from old storage we must notify the
 // NN that the files are on the new storage. Else a blockReport from
 // the transient storage might cause the NN to think the blocks are lost.
 // Replicas must be evicted from client short-circuit caches, because the
 // storage will no longer be same, and thus will require validating
 // checksum.  This also stops a client from holding file descriptors,
 // which would prevent the OS from reclaiming the memory.
 ExtendedBlock extendedBlock =
   new ExtendedBlock(bpid, newReplicaInfo);
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   ExtendedBlockId.fromExtendedBlock(extendedBlock));
 datanode.notifyNamenodeReceivedBlock(
   extendedBlock, null, newReplicaInfo.getStorageUuid(),
   newReplicaInfo.isOnTransientStorage());
 // Remove the old replicas
 if (blockFile.delete() || !blockFile.exists()) {
  ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, blockFileUsed);
  if (metaFile.delete() || !metaFile.exists()) {
   ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, metaFileUsed);
  }
 }
 // If deletion failed then the directory scanner will cleanup the blocks
 // eventually.
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

private boolean shouldDefer() {
 /* If revocationTimeMs == 0, this is an immediate uncache request.
  * No clients were anchored at the time we made the request. */
 if (revocationTimeMs == 0) {
  return false;
 }
 /* Let's check if any clients still have this block anchored. */
 boolean anchored =
  !dataset.datanode.getShortCircuitRegistry().
    processBlockMunlockRequest(key);
 if (!anchored) {
  LOG.debug("Uncaching {} now that it is no longer in use " +
    "by any clients.", key);
  return false;
 }
 long delta = revocationTimeMs - Time.monotonicNow();
 if (delta < 0) {
  LOG.warn("Forcibly uncaching {} after {} " +
    "because client(s) {} refused to stop using it.", key,
    DurationFormatUtils.formatDurationHMS(revocationTimeMs),
    dataset.datanode.getShortCircuitRegistry().getClientNames(key));
  return false;
 }
 LOG.info("Replica {} still can't be uncached because some " +
   "clients continue to use it.  Will wait for {}", key,
   DurationFormatUtils.formatDurationHMS(delta));
 return true;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

private void removeOldReplica(ReplicaInfo replicaInfo,
  ReplicaInfo newReplicaInfo, File blockFile, File metaFile,
  long blockFileUsed, long metaFileUsed, final String bpid) {
 // Before deleting the files from old storage we must notify the
 // NN that the files are on the new storage. Else a blockReport from
 // the transient storage might cause the NN to think the blocks are lost.
 // Replicas must be evicted from client short-circuit caches, because the
 // storage will no longer be same, and thus will require validating
 // checksum.  This also stops a client from holding file descriptors,
 // which would prevent the OS from reclaiming the memory.
 ExtendedBlock extendedBlock =
   new ExtendedBlock(bpid, newReplicaInfo);
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   ExtendedBlockId.fromExtendedBlock(extendedBlock));
 datanode.notifyNamenodeReceivedBlock(
   extendedBlock, null, newReplicaInfo.getStorageUuid(),
   newReplicaInfo.isOnTransientStorage());
 // Remove the old replicas
 if (blockFile.delete() || !blockFile.exists()) {
  ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, blockFileUsed);
  if (metaFile.delete() || !metaFile.exists()) {
   ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, metaFileUsed);
  }
 }
 // If deletion failed then the directory scanner will cleanup the blocks
 // eventually.
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

boolean deferred = false;
if (!dataset.datanode.getShortCircuitRegistry().
    processBlockMunlockRequest(key)) {
 deferred = true;

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

datanode.getShortCircuitRegistry().processBlockInvalidation(
     new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Before
public void setUp() throws IOException {
 datanode = mock(DataNode.class);
 storage = mock(DataStorage.class);
 this.conf = new Configuration();
 this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
 final DNConf dnConf = new DNConf(conf);
 when(datanode.getConf()).thenReturn(conf);
 when(datanode.getDnConf()).thenReturn(dnConf);
 final BlockScanner disabledBlockScanner = new BlockScanner(datanode, conf);
 when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner);
 final ShortCircuitRegistry shortCircuitRegistry =
   new ShortCircuitRegistry(conf);
 when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
 createStorageDirs(storage, conf, NUM_INIT_VOLUMES);
 dataset = new FsDatasetImpl(datanode, storage, conf);
 for (String bpid : BLOCK_POOL_IDS) {
  dataset.addBlockPool(bpid, conf);
 }
 assertEquals(NUM_INIT_VOLUMES, dataset.getVolumes().size());
 assertEquals(0, dataset.getNumFailedVolumes());
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Invalidate a block but does not delete the actual on-disk block file.
 *
 * It should only be used when deactivating disks.
 *
 * @param bpid the block pool ID.
 * @param block The block to be invalidated.
 */
public void invalidate(String bpid, ReplicaInfo block) {
 // If a DFSClient has the replica in its cache of short-circuit file
 // descriptors (and the client is using ShortCircuitShm), invalidate it.
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   new ExtendedBlockId(block.getBlockId(), bpid));
 // If the block is cached, start uncaching it.
 cacheManager.uncacheBlock(bpid, block.getBlockId());
 datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block),
   block.getStorageUuid());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Invalidate a block but does not delete the actual on-disk block file.
 *
 * It should only be used when deactivating disks.
 *
 * @param bpid the block pool ID.
 * @param block The block to be invalidated.
 */
public void invalidate(String bpid, ReplicaInfo block) {
 // If a DFSClient has the replica in its cache of short-circuit file
 // descriptors (and the client is using ShortCircuitShm), invalidate it.
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   new ExtendedBlockId(block.getBlockId(), bpid));
 // If the block is cached, start uncaching it.
 cacheManager.uncacheBlock(bpid, block.getBlockId());
 datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block),
   block.getStorageUuid());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

cluster.getDataNodes().get(0).getShortCircuitRegistry());
cluster.getDataNodes().get(0).getShortCircuitRegistry());

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
ShortCircuitRegistry registry =
  cluster.getDataNodes().get(0).getShortCircuitRegistry();
registry.visit(new ShortCircuitRegistry.Visitor() {
 @Override

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

cluster.getDataNodes().get(0).getShortCircuitRegistry());
cluster.shutdown();
sockDir.close();

相关文章

DataNode类方法