本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getXceiverCount()
方法的一些代码示例,展示了DataNode.getXceiverCount()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getXceiverCount()
方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getXceiverCount
[英]Number of concurrent xceivers per node.
[中]每个节点的并发Xceiver数。
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
private DataXceiver(Peer peer, DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException {
super(datanode.getTracer());
this.peer = peer;
this.dnConf = datanode.getDnConf();
this.socketIn = peer.getInputStream();
this.socketOut = peer.getOutputStream();
this.datanode = datanode;
this.dataXceiverServer = dataXceiverServer;
this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname;
this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(datanode.getConf());
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(datanode.getConf());
remoteAddress = peer.getRemoteAddressString();
final int colonIdx = remoteAddress.indexOf(':');
remoteAddressWithoutPort =
(colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
localAddress = peer.getLocalAddressString();
LOG.debug("Number of active connections is: {}",
datanode.getXceiverCount());
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
int curXceiverCount = datanode.getXceiverCount();
if (curXceiverCount > maxXceiverCount) {
throw new IOException("Xceiver count " + curXceiverCount
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
collectThreadLocalStates();
LOG.debug("{}:Number of active connections is: {}",
datanode.getDisplayName(), datanode.getXceiverCount());
updateCurrentThreadName("Cleaning up");
if (peer != null) {
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
private void sendLifeline() throws IOException {
StorageReport[] reports =
dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
if (LOG.isDebugEnabled()) {
LOG.debug("Sending lifeline with " + reports.length + " storage " +
" reports from service actor: " + BPServiceActor.this);
}
VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
.getVolumeFailureSummary();
int numFailedVolumes = volumeFailureSummary != null ?
volumeFailureSummary.getFailedStorageLocations().length : 0;
lifelineNamenode.sendLifeline(bpRegistration,
reports,
dn.getFSDataset().getCacheCapacity(),
dn.getFSDataset().getCacheUsed(),
dn.getXmitsInProgress(),
dn.getXceiverCount(),
numFailedVolumes,
volumeFailureSummary);
}
}
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
public DataXceiver(Socket s, DataNode datanode,
DataXceiverServer dataXceiverServer) {
this.s = s;
this.datanode = datanode;
this.dataXceiverServer = dataXceiverServer;
dataXceiverServer.childSockets.put(s, s);
remoteAddress = s.getRemoteSocketAddress().toString();
localAddress = s.getLocalSocketAddress().toString();
LOG.debug("Number of active connections is: " + datanode.getXceiverCount());
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Returns the datanode's xceiver count, but subtracts 1, since the
* DataXceiverServer counts as one.
*
* @return int xceiver count, not including DataXceiverServer
*/
private int getXceiverCountWithoutServer() {
return dn.getXceiverCount() - 1;
}
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
public DataXceiver(Socket s, DataNode datanode,
DataXceiverServer dataXceiverServer) {
this.s = s;
this.datanode = datanode;
this.dataXceiverServer = dataXceiverServer;
if (ClientTraceLog.isInfoEnabled()) {
getAddresses();
ClientTraceLog.info("Accepted DataXceiver connection: src "
+ remoteAddress + " dest " + localAddress + " XceiverCount: "
+ datanode.getXceiverCount());
}
dataXceiverServer.childSockets.put(s, s);
if (LOG.isDebugEnabled()) {
LOG.debug("Number of active connections is: " + datanode.getXceiverCount());
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
dn.getFSDataset().getCacheUsed(),
dn.getXmitsInProgress(),
dn.getXceiverCount(),
numFailedVolumes,
volumeFailureSummary,
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
private DataXceiver(Peer peer, DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException {
this.peer = peer;
this.dnConf = datanode.getDnConf();
this.socketIn = peer.getInputStream();
this.socketOut = peer.getOutputStream();
this.datanode = datanode;
this.dataXceiverServer = dataXceiverServer;
this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname;
remoteAddress = peer.getRemoteAddressString();
final int colonIdx = remoteAddress.indexOf(':');
remoteAddressWithoutPort =
(colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
localAddress = peer.getLocalAddressString();
if (LOG.isDebugEnabled()) {
LOG.debug("Number of active connections is: "
+ datanode.getXceiverCount());
}
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
private DataXceiver(Peer peer, DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException {
this.peer = peer;
this.dnConf = datanode.getDnConf();
this.socketIn = peer.getInputStream();
this.socketOut = peer.getOutputStream();
this.datanode = datanode;
this.dataXceiverServer = dataXceiverServer;
this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname;
remoteAddress = peer.getRemoteAddressString();
final int colonIdx = remoteAddress.indexOf(':');
remoteAddressWithoutPort =
(colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
localAddress = peer.getLocalAddressString();
if (LOG.isDebugEnabled()) {
LOG.debug("Number of active connections is: "
+ datanode.getXceiverCount());
}
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
data.getNSUsed(namespaceId),
xmitsInProgress.get(),
getXceiverCount());
this.lastBeingAlive = now();
LOG.debug("Sent heartbeat at " + this.lastBeingAlive);
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
int curXceiverCount = datanode.getXceiverCount();
if (curXceiverCount > maxXceiverCount) {
throw new IOException("Xceiver count " + curXceiverCount
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
byte op = in.readByte();
int curXceiverCount = datanode.getXceiverCount();
if (curXceiverCount > dataXceiverServer.maxXceiverCount) {
throw new IOException("xceiverCount " + curXceiverCount
} finally {
LOG.debug(datanode.dnRegistration + ":Number of active connections is: "
+ datanode.getXceiverCount());
IOUtils.closeStream(in);
IOUtils.closeSocket(s);
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
if (LOG.isDebugEnabled()) {
LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
+ datanode.getXceiverCount());
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
HeartbeatResponse sendHeartBeat() throws IOException {
scheduler.scheduleNextHeartbeat();
StorageReport[] reports =
dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
if (LOG.isDebugEnabled()) {
LOG.debug("Sending heartbeat with " + reports.length +
" storage reports from service actor: " + this);
}
VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
.getVolumeFailureSummary();
int numFailedVolumes = volumeFailureSummary != null ?
volumeFailureSummary.getFailedStorageLocations().length : 0;
return bpNamenode.sendHeartbeat(bpRegistration,
reports,
dn.getFSDataset().getCacheCapacity(),
dn.getFSDataset().getCacheUsed(),
dn.getXmitsInProgress(),
dn.getXceiverCount(),
numFailedVolumes,
volumeFailureSummary);
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
int curXceiverCount = datanode.getXceiverCount();
if (curXceiverCount > dataXceiverServer.maxXceiverCount) {
throw new IOException("xceiverCount " + curXceiverCount
+ datanode.getXceiverCount());
updateCurrentThreadName("Cleaning up");
IOUtils.closeStream(in);
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
} finally {
if (cluster != null) {cluster.shutdown();}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
int curXceiverCount = datanode.getXceiverCount();
if (curXceiverCount > maxXceiverCount) {
throw new IOException("Xceiver count " + curXceiverCount
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
HeartbeatResponse sendHeartBeat() throws IOException {
scheduler.scheduleNextHeartbeat();
StorageReport[] reports =
dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
if (LOG.isDebugEnabled()) {
LOG.debug("Sending heartbeat with " + reports.length +
" storage reports from service actor: " + this);
}
VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
.getVolumeFailureSummary();
int numFailedVolumes = volumeFailureSummary != null ?
volumeFailureSummary.getFailedStorageLocations().length : 0;
return bpNamenode.sendHeartbeat(bpRegistration,
reports,
dn.getFSDataset().getCacheCapacity(),
dn.getFSDataset().getCacheUsed(),
dn.getXmitsInProgress(),
dn.getXceiverCount(),
numFailedVolumes,
volumeFailureSummary);
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
if (LOG.isDebugEnabled()) {
LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
+ datanode.getXceiverCount());
内容来源于网络,如有侵权,请联系作者删除!