org.apache.hadoop.hdfs.server.datanode.DataNode.getDatanodeUuid()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(7.1k)|赞(0)|评价(0)|浏览(131)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getDatanodeUuid()方法的一些代码示例,展示了DataNode.getDatanodeUuid()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getDatanodeUuid()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getDatanodeUuid

DataNode.getDatanodeUuid介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
public String toString() {
 readLock();
 try {
  if (bpNSInfo == null) {
   // If we haven't yet connected to our NN, we don't yet know our
   // own block pool ID.
   // If _none_ of the block pools have connected yet, we don't even
   // know the DatanodeID ID of this DN.
   String datanodeUuid = dn.getDatanodeUuid();
   if (datanodeUuid == null || datanodeUuid.isEmpty()) {
    datanodeUuid = "unassigned";
   }
   return "Block pool <registering> (Datanode Uuid " + datanodeUuid + ")";
  } else {
   return "Block pool " + getBlockPoolId() +
     " (Datanode Uuid " + dn.getDatanodeUuid() +
     ")";
  }
 } finally {
  readUnlock();
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Initilizes {@link DiskBalancer}.
 * @param  data - FSDataSet
 * @param conf - Config
 */
private void initDiskBalancer(FsDatasetSpi data,
                      Configuration conf) {
 if (this.diskBalancer != null) {
  return;
 }
 DiskBalancer.BlockMover mover = new DiskBalancer.DiskBalancerMover(data,
   conf);
 this.diskBalancer = new DiskBalancer(getDatanodeUuid(), conf, mover);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

clientName, shmInfo.getShmId().getHi(),
   shmInfo.getShmId().getLo(),
   datanode.getDatanodeUuid()));
} else {
 BlockSender.ClientTraceLog.info(String.format(
   "op: REQUEST_SHORT_CIRCUIT_SHM, " +
   "shmId: n/a, srvID: %s, success: false",
   clientName, datanode.getDatanodeUuid()));

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

" shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b",
slotId.getShmId().getHi(), slotId.getShmId().getLo(),
slotId.getSlotIdx(), datanode.getDatanodeUuid(), success));

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

LOG.info("Storage directory with location {} is not formatted for "
   + "namespace {}. Formatting...", location, nsInfo.getNamespaceID());
 format(sd, nsInfo, datanode.getDatanodeUuid(), datanode.getConf());
 break;
default:  // recovery part is common

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

registerMBean(datanode.getDatanodeUuid());

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

static DataNode findDatanode(String id, List<DataNode> datanodes) {
  for(DataNode d : datanodes) {
   if (d.getDatanodeUuid().equals(id)) {
    return d;
   }
  }
  throw new IllegalStateException("Datnode " + id + " not in datanode list: "
    + datanodes);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
public String toString() {
 readLock();
 try {
  if (bpNSInfo == null) {
   // If we haven't yet connected to our NN, we don't yet know our
   // own block pool ID.
   // If _none_ of the block pools have connected yet, we don't even
   // know the DatanodeID ID of this DN.
   String datanodeUuid = dn.getDatanodeUuid();
   if (datanodeUuid == null || datanodeUuid.isEmpty()) {
    datanodeUuid = "unassigned";
   }
   return "Block pool <registering> (Datanode Uuid " + datanodeUuid + ")";
  } else {
   return "Block pool " + getBlockPoolId() +
     " (Datanode Uuid " + dn.getDatanodeUuid() +
     ")";
  }
 } finally {
  readUnlock();
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override
public String toString() {
 readLock();
 try {
  if (bpNSInfo == null) {
   // If we haven't yet connected to our NN, we don't yet know our
   // own block pool ID.
   // If _none_ of the block pools have connected yet, we don't even
   // know the DatanodeID ID of this DN.
   String datanodeUuid = dn.getDatanodeUuid();
   if (datanodeUuid == null || datanodeUuid.isEmpty()) {
    datanodeUuid = "unassigned";
   }
   return "Block pool <registering> (Datanode Uuid " + datanodeUuid + ")";
  } else {
   return "Block pool " + getBlockPoolId() +
     " (Datanode Uuid " + dn.getDatanodeUuid() +
     ")";
  }
 } finally {
  readUnlock();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

" shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b",
slotId.getShmId().getHi(), slotId.getShmId().getLo(),
slotId.getSlotIdx(), datanode.getDatanodeUuid(), success));

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

+ " is not formatted for namespace " + nsInfo.getNamespaceID()
   + ". Formatting...");
 format(sd, nsInfo, datanode.getDatanodeUuid());
 break;
default:  // recovery part is common

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private void doDecomCheck(DatanodeManager datanodeManager,
  DecommissionManager decomManager, int expectedNumCheckedNodes)
  throws IOException, ExecutionException, InterruptedException {
 // Decom all nodes
 ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
 for (DataNode d: cluster.getDataNodes()) {
  DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(),
    decommissionedNodes,
    AdminStates.DECOMMISSION_INPROGRESS);
  decommissionedNodes.add(dn);
 }
 // Run decom scan and check
 BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
 assertEquals("Unexpected # of nodes checked", expectedNumCheckedNodes, 
   decomManager.getNumNodesChecked());
 // Recommission all nodes
 for (DatanodeInfo dn : decommissionedNodes) {
  recommissionNode(0, dn);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

raFile.writeInt(0);
raFile.close();
String datanodeId0 = dn0.getDatanodeUuid();
LocatedBlock lb = DFSTestUtil.getAllBlocks(fs, filePath).get(0);
String storageId = lb.getStorageIDs()[0];

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

" shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b",
slotId.getShmId().getHi(), slotId.getShmId().getLo(),
slotId.getSlotIdx(), datanode.getDatanodeUuid(), success));

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
 public Boolean get() {
  final DatanodeDescriptor dnDescriptor =
    cluster.getNamesystem().getBlockManager().getDatanodeManager().
      getDatanode(datanodeToRemoveStorageFrom.getDatanodeUuid());
  assertNotNull(dnDescriptor);
  DatanodeStorageInfo[] infos = dnDescriptor.getStorageInfos();
  for (DatanodeStorageInfo info : infos) {
   if (info.getStorageID().equals(storageIdToRemove)) {
    LOG.info("Still found storage " + storageIdToRemove + " on " +
      info + ".");
    return false;
   }
  }
  assertEquals(NUM_STORAGES_PER_DN - 1, infos.length);
  return true;
 }
}, 1000, 30000);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

for (int i=0; i<2; i++) {
 final DataNode d = cluster.getDataNodes().get(i);
 DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(), 
   decommissionedNodes, 
   AdminStates.DECOMMISSION_INPROGRESS);
DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(),
  decommissionedNodes,
  AdminStates.DECOMMISSION_INPROGRESS);

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

+ " is not formatted for namespace " + nsInfo.getNamespaceID()
   + ". Formatting...");
 format(sd, nsInfo, datanode.getDatanodeUuid());
 break;
default:  // recovery part is common

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

String dId = cluster.getDataNodes().get(0).getDatanodeUuid();
DatanodeDescriptor dnd = BlockManagerTestUtil.getDatanode(ns, dId);
DatanodeStorageInfo[] storages = dnd.getStorageInfos();

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
last.shutdown();

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().
  getDatanode(cluster.getDataNodes().get(0).getDatanodeUuid());
DatanodeStorageInfo[] dnStoragesInfosBeforeRestart =
  dnDescriptor.getStorageInfos();

相关文章

DataNode类方法