org.apache.hadoop.hdfs.server.datanode.DataNode.getDatanodeId()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(8.9k)|赞(0)|评价(0)|浏览(100)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getDatanodeId()方法的一些代码示例,展示了DataNode.getDatanodeId()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getDatanodeId()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getDatanodeId

DataNode.getDatanodeId介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
 socketIn, datanode.getXferAddress().getPort(),
 datanode.getDatanodeId());
input = new BufferedInputStream(saslStreams.in,
  smallBufferSize);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

try {
 if (proxySource.equals(datanode.getDatanodeId())) {
  ReplicaInfo oldReplica = datanode.data.moveBlockAcrossStorage(block,
    storageType, storageId);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

.setNodeID(datanode.getDatanodeId()).build();
new Sender(out).writeBlock(block, storageType,
  blockToken, "", new DatanodeInfo[]{target},

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public void setDataNodesDead() throws IOException {
 for (DataNodeProperties dnp : dataNodes) {
  setDataNodeDead(dnp.datanode.getDatanodeId());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public synchronized DataNodeProperties stopDataNode(String dnName) {
 int node = -1;
 for (int i = 0; i < dataNodes.size(); i++) {
  DataNode dn = dataNodes.get(i).datanode;
  LOG.info("DN name=" + dnName + " found DN=" + dn +
    " with name=" + dn.getDisplayName());
  if (dnName.equals(dn.getDatanodeId().getXferAddr())) {
   node = i;
   break;
  }
 }
 return stopDataNode(node);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private void updateAllStorages(BlockManager bm,
                ArrayList<DataNode> datanodes) throws Exception {
 for (DataNode dd : datanodes) {
  DatanodeDescriptor descriptor =
    bm.getDatanodeManager().getDatanode(dd.getDatanodeId());
  Set<DatanodeStorageInfo> setInfos = new HashSet<DatanodeStorageInfo>();
  DatanodeStorageInfo[] infos = descriptor.getStorageInfos();
  Random random = new Random();
  for (int i = 0; i < infos.length; i++) {
   int blkId = random.nextInt(101);
   DatanodeStorage storage = new DatanodeStorage(Integer.toString(blkId),
     DatanodeStorage.State.FAILED, StorageType.DISK);
   infos[i].updateFromStorage(storage);
   setInfos.add(infos[i]);
  }
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Stop the heartbeat of a datanode in the MiniDFSCluster
 * 
 * @param cluster
 *          The MiniDFSCluster
 * @param hostName
 *          The hostName of the datanode to be stopped
 * @return The DataNode whose heartbeat has been stopped
 */
private DataNode stopDataNodeHeartbeat(MiniDFSCluster cluster, String hostName) {
 for (DataNode dn : cluster.getDataNodes()) {
  if (dn.getDatanodeId().getHostName().equals(hostName)) {
   DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
   return dn;
  }
 }
 return null;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private DataNode getDataNode(DatanodeInfo decomInfo) {
 DataNode decomNode = null;
 for (DataNode dn: cluster.getDataNodes()) {
  if (decomInfo.equals(dn.getDatanodeId())) {
   decomNode = dn;
   break;
  }
 }
 assertNotNull("Could not find decomNode in cluster!", decomNode);
 return decomNode;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Restart a particular DataNode.
 * @param idn index of the DataNode
 * @param keepPort true if should restart on the same port
 * @param expireOnNN true if NameNode should expire the DataNode heartbeat
 * @return
 * @throws IOException
 */
public synchronized boolean restartDataNode(
  int idn, boolean keepPort, boolean expireOnNN) throws IOException {
 DataNodeProperties dnprop = stopDataNode(idn);
 if(expireOnNN) {
  setDataNodeDead(dnprop.datanode.getDatanodeId());
 }
 if (dnprop == null) {
  return false;
 } else {
  return restartDataNode(dnprop, keepPort);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/** MiniDFSCluster should not clobber dfs.datanode.hostname if requested */
 @Test(timeout=100000)
 public void testClusterSetDatanodeHostname() throws Throwable {
  assumeTrue(System.getProperty("os.name").startsWith("Linux"));
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
  File testDataCluster5 = new File(testDataPath, CLUSTER_5);
  String c5Path = testDataCluster5.getAbsolutePath();
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
  MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
   .numDataNodes(1)
   .checkDataNodeHostConfig(true)
   .build();
  try {
   assertEquals("DataNode hostname config not respected", "MYHOST",
     cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
  } finally {
   MiniDFSCluster.shutdownCluster(cluster5);
  }
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private Socket createSocket() throws IOException {
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  LOG.info("MiniDFSCluster started.");
  return DFSOutputStream.createSocketForPipeline(
    new DatanodeInfo(cluster.dataNodes.get(0).datanode.getDatanodeId()),
    1, cluster.getFileSystem().getClient());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@BeforeClass
public static void setUp() throws Exception {
 cluster = (new MiniDFSCluster.Builder(conf))
   .numDataNodes(1).build();
 nnAddress = cluster.getNameNode().getNameNodeAddress();
 DataNode dn = cluster.getDataNodes().get(0);
 dnAddress = new InetSocketAddress(dn.getDatanodeId().getIpAddr(),
                  dn.getIpcPort());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

bm.getDatanodeManager().getDatanode(dn.getDatanodeId());
DatanodeStorageInfo[] storageInfos = dnDescriptor.getStorageInfos();

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

Peer peer = peerCache.get(dn.getDatanodeId(), false);
assertTrue(peer == null);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Verify the support for decommissioning a datanode that is already dead.
 * Under this scenario the datanode should immediately be marked as
 * DECOMMISSIONED
 */
@Test(timeout=120000)
public void testDecommissionDeadDN() throws Exception {
 Logger log = Logger.getLogger(DecommissionManager.class);
 log.setLevel(Level.DEBUG);
 DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
 String dnName = dnID.getXferAddr();
 DataNodeProperties stoppedDN = cluster.stopDataNode(0);
 DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(),
   false, 30000);
 FSNamesystem fsn = cluster.getNamesystem();
 final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
 DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
 decommissionNode(fsn, localFileSys, dnName);
 dm.refreshNodes(conf);
 BlockManagerTestUtil.recheckDecommissionState(dm);
 assertTrue(dnDescriptor.isDecommissioned());
 // Add the node back
 cluster.restartDataNode(stoppedDN, true);
 cluster.waitActive();
 // Call refreshNodes on FSNamesystem with empty exclude file to remove the
 // datanode from decommissioning list and make it available again.
 writeConfigFile(localFileSys, excludeFile, null);
 dm.refreshNodes(conf);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Check that the NameNode is not attempting to cache anything.
 */
private void checkPendingCachedEmpty(MiniDFSCluster cluster)
  throws Exception {
 cluster.getNamesystem().readLock();
 try {
  final DatanodeManager datanodeManager =
    cluster.getNamesystem().getBlockManager().getDatanodeManager();
  for (DataNode dn : cluster.getDataNodes()) {
   DatanodeDescriptor descriptor =
     datanodeManager.getDatanode(dn.getDatanodeId());
   Assert.assertTrue("Pending cached list of " + descriptor +
       " is not empty, "
       + Arrays.toString(descriptor.getPendingCached().toArray()), 
     descriptor.getPendingCached().isEmpty());
  }
 } finally {
  cluster.getNamesystem().readUnlock();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

.getBlockManager()
                     .getDatanodeManager()
                     .getDatanode(dn0.getDatanodeId())
                     .getStorageInfo(newStorageUuid);
assertNotNull(storageInfo);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

DatanodeDescriptor dnDes = cluster.getNameNode().getNamesystem()
  .getBlockManager().getDatanodeManager()
  .getDatanode(dn.getDatanodeId());
DFSTestUtil.resetLastUpdatesWithOffset(dnDes, -(staleInterval + 1));
DatanodeDescriptor dnDes = cluster.getNameNode().getNamesystem()
  .getBlockManager().getDatanodeManager()
  .getDatanode(dn.getDatanodeId());
DFSTestUtil.resetLastUpdatesWithOffset(dnDes, 0);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
 // start a cluster
 final Configuration conf = new HdfsConfiguration();
 MiniDFSCluster cluster = null;
 try {
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  final DFSAdmin dfsadmin = new DFSAdmin(conf);
  DataNode dn = cluster.getDataNodes().get(0);
  // check the datanode
  final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
  final String[] args1 = {"-getDatanodeInfo", dnAddr};
  runCmd(dfsadmin, true, args1);
  // issue shutdown to the datanode.
  final String[] args2 = {"-shutdownDatanode", dnAddr, "upgrade" };
  runCmd(dfsadmin, true, args2);
  // the datanode should be down.
  GenericTestUtils.waitForThreadTermination(
    "Async datanode shutdown thread", 100, 10000);
  Assert.assertFalse("DataNode should exit", dn.isDatanodeUp());
  // ping should fail.
  assertEquals(-1, dfsadmin.run(args1));
 } finally {
  if (cluster != null) cluster.shutdown();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

throws Exception {
DatanodeDescriptor dd = cluster.getNamesystem().getBlockManager()
  .getDatanodeManager().getDatanode(dn.getDatanodeId());
assertEquals(expectedFailedVolumes.length, dd.getVolumeFailures());
VolumeFailureSummary volumeFailureSummary = dd.getVolumeFailureSummary();

相关文章

DataNode类方法