org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(9.0k)|赞(0)|评价(0)|浏览(166)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock()方法的一些代码示例,展示了DataNode.syncBlock()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.syncBlock()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:syncBlock

DataNode.syncBlock介绍

[英]Block synchronization
[中]块同步

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
 * BlockRecoveryFI_06. all datanodes throws an exception.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testErrorReplicas() throws IOException, InterruptedException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doThrow(new IOException()).
   when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
 Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
 d.join();
 verify(spyDN, never()).syncBlock(
   any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
 * BlockRecoveryFI_09. some/all DNs failed to update replicas.
 *
 * @throws IOException in case of an error
 */
@Test
public void testFailedReplicaUpdate() throws IOException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doThrow(new IOException()).when(spyDN).updateReplicaUnderRecovery(
   block, RECOVERY_ID, block.getNumBytes());
 try {
  spyDN.syncBlock(rBlock, initBlockRecords(spyDN));
  fail("Sync should fail");
 } catch (IOException e) {
  e.getMessage().startsWith("Cannot recover ");
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
 * BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testRecoveryInProgressException()
 throws IOException, InterruptedException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
   when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
 Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
 d.join();
 verify(spyDN, never()).syncBlock(
   any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
 * BlockRecoveryFI_10. DN has no ReplicaUnderRecovery.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNoReplicaUnderRecovery() throws IOException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 dn.data.createRbw(block);
 try {
  dn.syncBlock(rBlock, initBlockRecords(dn));
  fail("Sync should fail");
 } catch (IOException e) {
  e.getMessage().startsWith("Cannot recover ");
 }
 verify(dn.namenode, never()).commitBlockSynchronization(
   any(Block.class), anyLong(), anyLong(), anyBoolean(),
   anyBoolean(), any(DatanodeID[].class));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
  * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
  *
  * @throws IOException in case of an error
  */
 @Test
 public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
   LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
  BlockWriteStreams streams = null;
  try {
   streams = replicaInfo.createStreams(true, 0, 0);
   streams.checksumOut.write('a');
   dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
   try {
    dn.syncBlock(rBlock, initBlockRecords(dn));
    fail("Sync should fail");
   } catch (IOException e) {
    e.getMessage().startsWith("Cannot recover ");
   }
   verify(dn.namenode, never()).commitBlockSynchronization(
     any(Block.class), anyLong(), anyLong(), anyBoolean(),
     anyBoolean(), any(DatanodeID[].class));
  } finally {
   streams.close();
  }
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * BlockRecoveryFI_06. all datanodes throws an exception.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testErrorReplicas() throws IOException, InterruptedException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doThrow(new IOException()).
   when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
 Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
 d.join();
 verify(spyDN, never()).syncBlock(
   any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * BlockRecoveryFI_09. some/all DNs failed to update replicas.
 *
 * @throws IOException in case of an error
 */
@Test
public void testFailedReplicaUpdate() throws IOException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doThrow(new IOException()).when(spyDN).updateReplicaUnderRecovery(
   block, RECOVERY_ID, BLOCK_ID, block.getNumBytes());
 try {
  spyDN.syncBlock(rBlock, initBlockRecords(spyDN));
  fail("Sync should fail");
 } catch (IOException e) {
  e.getMessage().startsWith("Cannot recover ");
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1, 
  ReplicaRecoveryInfo replica2,
  InterDatanodeProtocol dn1,
  InterDatanodeProtocol dn2,
  long expectLen) throws IOException {
 
 DatanodeInfo[] locs = new DatanodeInfo[]{
   mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
 RecoveringBlock rBlock = new RecoveringBlock(block, 
   locs, RECOVERY_ID);
 ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
 BlockRecord record1 = new BlockRecord(
   new DatanodeID("xx", "yy", 44, 55), dn1, replica1);
 BlockRecord record2 = new BlockRecord(
   new DatanodeID("aa", "bb", 11, 22), dn2, replica2);
 syncList.add(record1);
 syncList.add(record2);
 
 when(dn1.updateReplicaUnderRecovery((Block)anyObject(), anyLong(), 
   anyLong())).thenReturn(new Block(block.getBlockId(), 
     expectLen, block.getGenerationStamp()));
 when(dn2.updateReplicaUnderRecovery((Block)anyObject(), anyLong(), 
   anyLong())).thenReturn(new Block(block.getBlockId(), 
     expectLen, block.getGenerationStamp()));
 dn.syncBlock(rBlock, syncList);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

syncBlock(rBlock, syncList);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testRecoveryInProgressException()
 throws IOException, InterruptedException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 DataNode spyDN = spy(dn);
 doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
   when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
 Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
 d.join();
 verify(spyDN, never()).syncBlock(
   any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}

代码示例来源:origin: io.fabric8/fabric-hadoop

block.setNumBytes(minlength);
 return syncBlock(block, syncList, targets, closeFile);
} finally {
 synchronized (ongoingRecovery) {

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

block.setNumBytes(minlength);
 return syncBlock(block, syncList, closeFile);
} finally {
 synchronized (ongoingRecovery) {

代码示例来源:origin: com.facebook.hadoop/hadoop-core

block.setNumBytes(minlength);
 return syncBlock(namespaceId, block, syncList, closeFile,
   datanodeProxies, deadline);
} finally {

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * BlockRecoveryFI_10. DN has no ReplicaUnderRecovery.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNoReplicaUnderRecovery() throws IOException {
 if(LOG.isDebugEnabled()) {
  LOG.debug("Running " + GenericTestUtils.getMethodName());
 }
 dn.data.createRbw(StorageType.DEFAULT, block, false);
 try {
  dn.syncBlock(rBlock, initBlockRecords(dn));
  fail("Sync should fail");
 } catch (IOException e) {
  e.getMessage().startsWith("Cannot recover ");
 }
 DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
 verify(namenode, never()).commitBlockSynchronization(
   any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
   anyBoolean(), any(DatanodeID[].class), any(String[].class));
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

syncBlock(rBlock, syncList);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1, 
  ReplicaRecoveryInfo replica2,
  InterDatanodeProtocol dn1,
  InterDatanodeProtocol dn2,
  long expectLen) throws IOException {
 
 DatanodeInfo[] locs = new DatanodeInfo[]{
   mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
 RecoveringBlock rBlock = new RecoveringBlock(block, 
   locs, RECOVERY_ID);
 ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
 BlockRecord record1 = new BlockRecord(
   DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
 BlockRecord record2 = new BlockRecord(
   DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
 syncList.add(record1);
 syncList.add(record2);
 
 when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
   anyLong(), anyLong())).thenReturn("storage1");
 when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
   anyLong(), anyLong())).thenReturn("storage2");
 dn.syncBlock(rBlock, syncList);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
try {
 dn.syncBlock(rBlock, initBlockRecords(dn));
 fail("Sync should fail");
} catch (IOException e) {

相关文章

DataNode类方法