org.apache.hadoop.hdfs.server.datanode.DataNode.getAllBpOs()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(6.6k)|赞(0)|评价(0)|浏览(125)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getAllBpOs()方法的一些代码示例,展示了DataNode.getAllBpOs()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getAllBpOs()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getAllBpOs

DataNode.getAllBpOs介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * @param addr rpc address of the namenode
 * @return true if the datanode is connected to a NameNode at the
 * given address
 */
public boolean isConnectedToNN(InetSocketAddress addr) {
 for (BPOfferService bpos : getAllBpOs()) {
  for (BPServiceActor bpsa : bpos.getBPServiceActors()) {
   if (addr.equals(bpsa.getNNSocketAddress())) {
    return bpsa.isAlive();
   }
  }
 }
 return false;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * @param addr rpc address of the namenode
 * @return true if the datanode is connected to a NameNode at the
 * given address
 */
public boolean isConnectedToNN(InetSocketAddress addr) {
 for (BPOfferService bpos : getAllBpOs()) {
  for (BPServiceActor bpsa : bpos.getBPServiceActors()) {
   if (addr.equals(bpsa.getNNSocketAddress())) {
    return bpsa.isAlive();
   }
  }
 }
 return false;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * @param addr rpc address of the namenode
 * @return true if the datanode is connected to a NameNode at the
 * given address
 */
public boolean isConnectedToNN(InetSocketAddress addr) {
 for (BPOfferService bpos : getAllBpOs()) {
  for (BPServiceActor bpsa : bpos.getBPServiceActors()) {
   if (addr.equals(bpsa.getNNSocketAddress())) {
    return bpsa.isAlive();
   }
  }
 }
 return false;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public static void triggerDeletionReport(DataNode dn) throws IOException {
 for (BPOfferService bpos : dn.getAllBpOs()) {
  bpos.triggerDeletionReportForTests();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public static void triggerHeartbeat(DataNode dn) throws IOException {
 for (BPOfferService bpos : dn.getAllBpOs()) {
  bpos.triggerHeartbeatForTests();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public static void triggerBlockReport(DataNode dn) throws IOException {
 for (BPOfferService bpos : dn.getAllBpOs()) {
  bpos.triggerBlockReportForTests();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private void stopBPServiceThreads(int numStopThreads, DataNode dn)
  throws Exception {
 BPOfferService[] bpoList = dn.getAllBpOs();
 int expected = dn.getBpOsCount() - numStopThreads;
 int index = numStopThreads - 1;
 while (index >= 0) {
  bpoList[index--].stop();
 }
 int iterations = 3000; // Total 30 seconds MAX wait time
 while(dn.getBpOsCount() != expected && iterations > 0) {
  Thread.sleep(WAIT_TIME_IN_MILLIS);
  iterations--;
 }
 assertEquals("Mismatch in number of BPServices running", expected,
   dn.getBpOsCount());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

dn.getAllBpOs()[0].triggerHeartbeatForTests();

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Before
public void startCluster() throws IOException {
 conf = new HdfsConfiguration();
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DN_COUNT).build();
 singletonNn = cluster.getNameNode();
 singletonDn = cluster.getDataNodes().get(0);
 bpos = singletonDn.getAllBpOs()[0];
 actor = bpos.getBPServiceActors().get(0);
 storageUuid = singletonDn.getFSDataset().getVolumes().get(0).getStorageID();
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

BPOfferService [] bposs = dn.getAllBpOs(); 
LOG.info("dn bpos len (should be 2):" + bposs.length);
Assert.assertEquals("should've registered with two namenodes", bposs.length,2);
bposs = dn.getAllBpOs(); 
LOG.info("dn bpos len (should be 3):" + bposs.length);
Assert.assertEquals("should've registered with three namenodes", bposs.length,3);
bposs = dn.getAllBpOs(); 
LOG.info("dn bpos len (still should be 3):" + bposs.length);
Assert.assertEquals("should've registered with three namenodes", 3, bposs.length);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

for (BPOfferService thisBpos : dn.getAllBpOs()) {
 if (thisBpos.getBlockPoolId().equals(bpid)) {
  bpos = thisBpos;

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

DataNode datanode = cluster.getDataNodes().get(0);
BPServiceActor actor =
  datanode.getAllBpOs()[0].getBPServiceActors().get(0);
String storageUuid =
  datanode.getFSDataset().getVolumes().get(0).getStorageID();

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

assertEquals(1, dn.getAllBpOs().length);
assertEquals(2, dn.getAllBpOs().length);
assertEquals(3, dn.getAllBpOs().length);
for (BPOfferService bpos : dn.getAllBpOs()) {
 for (BPServiceActor bpsa : bpos.getBPServiceActors()) {
  assertTrue(nnAddrsFromDN.add(bpsa.getNNSocketAddress()));

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

int blocksProcessed = 0, savedBlocksProcessed = 0;
try {
 BPOfferService bpos[] = ctx.datanode.getAllBpOs();
 assertEquals(1, bpos.length);
 BlockIterator iter = volume.newBlockIterator(ctx.bpids[0], "test");

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

for (BPOfferService bpos : dn.getAllBpOs()) {
  LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid="
    + bpos.bpRegistration.getDatanodeUuid() + "; nna=" +
 BPOfferService bpos1 = dn.getAllBpOs()[0];
 bpos1.triggerBlockReportForTests();
 assertEquals(0, dn.getAllBpOs().length);
 cluster = null;
} finally {

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

for (BPOfferService bpos : dn.getAllBpOs()) {
 LOG.info("BP: " + bpos);
BPOfferService bpos1 = dn.getAllBpOs()[0];
BPOfferService bpos2 = dn.getAllBpOs()[1];

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
dn1.refreshNamenodes(nn1Conf);
assertEquals(1, dn1.getAllBpOs().length);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId2");
dn1.refreshNamenodes(nn1Conf);
assertEquals(1, dn1.getAllBpOs().length);
assertEquals(1, dn2.getAllBpOs().length);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Test(timeout = 20000)
public void testClusterIdMismatchAtStartupWithHA() throws Exception {
 MiniDFSNNTopology top = new MiniDFSNNTopology()
  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
   .addNN(new MiniDFSNNTopology.NNConf("nn0"))
   .addNN(new MiniDFSNNTopology.NNConf("nn1")))
  .addNameservice(new MiniDFSNNTopology.NSConf("ns2")
   .addNN(new MiniDFSNNTopology.NNConf("nn2").setClusterId("bad-cid"))
   .addNN(new MiniDFSNNTopology.NNConf("nn3").setClusterId("bad-cid")));
 top.setFederation(true);
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top)
   .numDataNodes(0).build();
 
 try {
  cluster.startDataNodes(conf, 1, true, null, null);
  // let the initialization be complete
  Thread.sleep(10000);
  DataNode dn = cluster.getDataNodes().get(0);
  assertTrue("Datanode should be running", dn.isDatanodeUp());
  assertEquals("Only one BPOfferService should be running", 1,
    dn.getAllBpOs().length);
 } finally {
  cluster.shutdown();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

assertTrue("Datanode should be running", dn.isDatanodeUp());
assertEquals("BPOfferService should be running", 1,
  dn.getAllBpOs().length);
DataNodeProperties dnProp = cluster.stopDataNode(0);

相关文章

DataNode类方法