本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getDisplayName()
方法的一些代码示例,展示了DataNode.getDisplayName()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getDisplayName()
方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getDisplayName
暂无
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
@Override
public String toString() {
return "DataNode{data=" + data + ", localName='" + getDisplayName()
+ "', datanodeUuid='" + storage.getDatanodeUuid() + "', xmitsInProgress="
+ xmitsInProgress.get() + "}";
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
void kill() {
assert (datanode.shouldRun == false || datanode.shutdownForUpgrade) :
"shoudRun should be set to false or restarting should be true"
+ " before killing";
try {
this.peerServer.close();
this.closed = true;
} catch (IOException ie) {
LOG.warn(datanode.getDisplayName() + ":DataXceiverServer.kill(): ", ie);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ace);
LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ie);
} catch (OutOfMemoryError ie) {
IOUtils.cleanup(null, peer);
LOG.error(datanode.getDisplayName()
+ ":DataXceiverServer: Exiting due to: ", te);
datanode.shouldRun = false;
closed = true;
} catch (IOException ie) {
LOG.warn(datanode.getDisplayName()
+ " :DataXceiverServer: close exception", ie);
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
(!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
} catch (Throwable t) {
String s = datanode.getDisplayName() + ":DataXceiver error processing "
+ ((op == null) ? "unknown" : op.name()) + " operation "
+ " src: " + remoteAddress + " dst: " + localAddress;
collectThreadLocalStates();
LOG.debug("{}:Number of active connections is: {}",
datanode.getDisplayName(), datanode.getXceiverCount());
updateCurrentThreadName("Cleaning up");
if (peer != null) {
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
@Override // FsDatasetSpi
public ReplicaHandler recoverRbw(
ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
throws IOException {
LOG.info("Recover RBW replica " + b);
while (true) {
try {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo =
getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
// check the replica's state
if (replicaInfo.getState() != ReplicaState.RBW) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
}
ReplicaInPipeline rbw = (ReplicaInPipeline)replicaInfo;
if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
throw new MustStopExistingWriter(rbw);
}
LOG.info("At " + datanode.getDisplayName() + ", Recovering " + rbw);
return recoverRbwImpl(rbw, b, newGS, minBytesRcvd, maxBytesRcvd);
}
} catch (MustStopExistingWriter e) {
e.getReplicaInPipeline().stopWriter(
datanode.getDnConf().getXceiverStopTimeout());
}
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
getClass().getSimpleName(), DataNode.this.getDisplayName(),
b, b.getNumBytes(), curTarget);
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
initIpcServer();
metrics = DataNodeMetrics.create(getConf(), getDisplayName());
peerMetrics = dnConf.peerStatsEnabled ?
DataNodePeerMetrics.create(getDisplayName()) : null;
metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
+ ", maxBytesRcvd=" + maxBytesRcvd + "\n clientname=" + clientname
+ ", srcDataNode=" + srcDataNode
+ ", datanode=" + datanode.getDisplayName()
+ "\n requestedChecksum=" + requestedChecksum
+ "\n cachingStrategy=" + cachingStrategy
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
@Override
public String toString() {
return "DataNode{data=" + data + ", localName='" + getDisplayName()
+ "', datanodeUuid='" + storage.getDatanodeUuid() + "', xmitsInProgress="
+ xmitsInProgress.get() + "}";
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Override
public String toString() {
return "DataNode{data=" + data + ", localName='" + getDisplayName()
+ "', datanodeUuid='" + storage.getDatanodeUuid() + "', xmitsInProgress="
+ xmitsInProgress.get() + "}";
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
void kill() {
assert (datanode.shouldRun == false || datanode.shutdownForUpgrade) :
"shoudRun should be set to false or restarting should be true"
+ " before killing";
try {
this.peerServer.close();
this.closed = true;
} catch (IOException ie) {
LOG.warn(datanode.getDisplayName() + ":DataXceiverServer.kill(): ", ie);
}
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
void kill() {
assert (datanode.shouldRun == false || datanode.shutdownForUpgrade) :
"shoudRun should be set to false or restarting should be true"
+ " before killing";
try {
this.peerServer.close();
this.closed = true;
} catch (IOException ie) {
LOG.warn(datanode.getDisplayName() + ":DataXceiverServer.kill(): ", ie);
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
public synchronized DataNodeProperties stopDataNode(int i) {
if (i < 0 || i >= dataNodes.size()) {
return null;
}
DataNodeProperties dnprop = dataNodes.remove(i);
DataNode dn = dnprop.datanode;
LOG.info("MiniDFSCluster Stopping DataNode " +
dn.getDisplayName() +
" from a total of " + (dataNodes.size() + 1) +
" datanodes.");
dn.shutdown();
numDataNodes--;
return dnprop;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
public synchronized DataNodeProperties stopDataNodeForUpgrade(int i)
throws IOException {
if (i < 0 || i >= dataNodes.size()) {
return null;
}
DataNodeProperties dnprop = dataNodes.remove(i);
DataNode dn = dnprop.datanode;
LOG.info("MiniDFSCluster Stopping DataNode " +
dn.getDisplayName() +
" from a total of " + (dataNodes.size() + 1) +
" datanodes.");
dn.shutdownDatanode(true);
numDataNodes--;
return dnprop;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Override public Boolean get() {
LOG.info("dn " + dn.getDisplayName() + " space : " + volume
.getReservedForRbw());
return (volume.getReservedForRbw() == 0);
}
}, 100, Integer.MAX_VALUE); // Wait until the test times out.
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Override
public Boolean get() {
LOG.info(
"dn " + dn.getDisplayName() + " space : " + volume
.getReservedForRbw() + ", Expected ReservedSpace :"
+ expectedReserved);
return (volume.getReservedForRbw() == expectedReserved);
}
}, 100, 3000);
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
public synchronized DataNodeProperties stopDataNode(String dnName) {
int node = -1;
for (int i = 0; i < dataNodes.size(); i++) {
DataNode dn = dataNodes.get(i).datanode;
LOG.info("DN name=" + dnName + " found DN=" + dn +
" with name=" + dn.getDisplayName());
if (dnName.equals(dn.getDatanodeId().getXferAddr())) {
node = i;
break;
}
}
return stopDataNode(node);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
static void logIbrCounts(List<DataNode> datanodes) {
final String name = "IncrementalBlockReportsNumOps";
for(DataNode dn : datanodes) {
final MetricsRecordBuilder m = MetricsAsserts.getMetrics(
dn.getMetrics().name());
final long ibr = MetricsAsserts.getLongCounter(name, m);
LOG.info(dn.getDisplayName() + ": " + name + "=" + ibr);
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
private void startDNandWait(Path filePath, boolean waitReplicas)
throws IOException, InterruptedException, TimeoutException {
if (LOG.isDebugEnabled()) {
LOG.debug("Before next DN start: " + cluster.getDataNodes().size());
}
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitClusterUp();
ArrayList<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 2);
if (LOG.isDebugEnabled()) {
int lastDn = datanodes.size() - 1;
LOG.debug("New datanode "
+ cluster.getDataNodes().get(lastDn).getDisplayName()
+ " has been started");
}
if (waitReplicas) {
DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
StringBuilder includeHosts = new StringBuilder();
for(DataNode dn : cluster.getDataNodes()) {
includeHosts.append(dn.getDisplayName()).append("\n");
内容来源于网络,如有侵权,请联系作者删除!