org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlocks()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(5.2k)|赞(0)|评价(0)|浏览(132)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlocks()方法的一些代码示例,展示了DataNode.recoverBlocks()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.recoverBlocks()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:recoverBlocks

DataNode.recoverBlocks介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
 * BlockRecoveryFI_06. all datanodes throws an exception.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testErrorReplicas() throws IOException, InterruptedException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doThrow(new IOException()).
   when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
 Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
 d.join();
 verify(spyDN, never()).syncBlock(
   any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
 * BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testRecoveryInProgressException()
 throws IOException, InterruptedException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
   when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
 Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
 d.join();
 verify(spyDN, never()).syncBlock(
   any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
   block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
   initReplicaRecovery(any(RecoveringBlock.class));
 Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
 d.join();
 verify(dn.namenode).commitBlockSynchronization(
   block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * BlockRecoveryFI_06. all datanodes throws an exception.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testErrorReplicas() throws IOException, InterruptedException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doThrow(new IOException()).
   when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
 Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
 d.join();
 verify(spyDN, never()).syncBlock(
   any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testRecoveryInProgressException()
 throws IOException, InterruptedException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
   when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
 Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
 d.join();
 verify(spyDN, never()).syncBlock(
   any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
   block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
   initReplicaRecovery(any(RecoveringBlock.class));
 Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
 d.join();
 DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
 verify(dnP).commitBlockSynchronization(
   block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

break;
case DatanodeProtocol.DNA_RECOVERBLOCK:
 recoverBlocks(namespaceId, bcmd.getBlocks(), bcmd.getTargets());
 break;
default:

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

break;
case DatanodeProtocol.DNA_RECOVERBLOCK:
 recoverBlocks(bcmd.getBlocks(), bcmd.getTargets());
 break;
default:

代码示例来源:origin: io.fabric8/fabric-hadoop

break;
case DatanodeProtocol.DNA_RECOVERBLOCK:
 recoverBlocks(bcmd.getBlocks(), bcmd.getTargets());
 break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

case DatanodeProtocol.DNA_RECOVERBLOCK:
 String who = "NameNode at " + actor.getNNSocketAddress();
 dn.recoverBlocks(who, ((BlockRecoveryCommand)cmd).getRecoveringBlocks());
 break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

case DatanodeProtocol.DNA_RECOVERBLOCK:
 String who = "NameNode at " + actor.getNNSocketAddress();
 dn.recoverBlocks(who, ((BlockRecoveryCommand)cmd).getRecoveringBlocks());
 break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:

相关文章

DataNode类方法