本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getIpcPort()
方法的一些代码示例,展示了DataNode.getIpcPort()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getIpcPort()
方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getIpcPort
暂无
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Create a DatanodeRegistration for a specific block pool.
* @param nsInfo the namespace info from the first part of the NN handshake
*/
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
if (storageInfo == null) {
// it's null in the case of SimulatedDataSet
storageInfo = new StorageInfo(
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
NodeType.DATA_NODE);
}
DatanodeID dnId = new DatanodeID(
streamingAddr.getAddress().getHostAddress(), hostName,
storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
infoSecurePort, getIpcPort());
return new DatanodeRegistration(dnId, storageInfo,
new ExportedBlockKeys(), VersionInfo.getVersion());
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Restart a datanode, on the same port if requested
* @param dnprop the datanode to restart
* @param keepPort whether to use the same port
* @return true if restarting is successful
* @throws IOException
*/
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
boolean keepPort) throws IOException {
Configuration conf = dnprop.conf;
String[] args = dnprop.dnArgs;
SecureResources secureResources = dnprop.secureResources;
Configuration newconf = new HdfsConfiguration(conf); // save cloned config
if (keepPort) {
InetSocketAddress addr = dnprop.datanode.getXferAddress();
conf.set(DFS_DATANODE_ADDRESS_KEY,
addr.getAddress().getHostAddress() + ":" + addr.getPort());
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort);
}
DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
dataNodes.add(new DataNodeProperties(
newDn, newconf, args, secureResources, newDn.getIpcPort()));
numDataNodes++;
return true;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Stores the information related to a namenode in the cluster
*/
public static class NameNodeInfo {
final NameNode nameNode;
final Configuration conf;
final String nameserviceId;
final String nnId;
StartupOption startOpt;
NameNodeInfo(NameNode nn, String nameserviceId, String nnId,
StartupOption startOpt, Configuration conf) {
this.nameNode = nn;
this.nameserviceId = nameserviceId;
this.nnId = nnId;
this.startOpt = startOpt;
this.conf = conf;
}
public void setStartOpt(StartupOption startOpt) {
this.startOpt = startOpt;
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Create a DatanodeRegistration for a specific block pool.
* @param nsInfo the namespace info from the first part of the NN handshake
*/
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
if (storageInfo == null) {
// it's null in the case of SimulatedDataSet
storageInfo = new StorageInfo(
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
NodeType.DATA_NODE);
}
DatanodeID dnId = new DatanodeID(
streamingAddr.getAddress().getHostAddress(), hostName,
storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
infoSecurePort, getIpcPort());
return new DatanodeRegistration(dnId, storageInfo,
new ExportedBlockKeys(), VersionInfo.getVersion());
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
/**
* Create a DatanodeRegistration for a specific block pool.
* @param nsInfo the namespace info from the first part of the NN handshake
*/
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
if (storageInfo == null) {
// it's null in the case of SimulatedDataSet
storageInfo = new StorageInfo(
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
NodeType.DATA_NODE);
}
DatanodeID dnId = new DatanodeID(
streamingAddr.getAddress().getHostAddress(), hostName,
storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
infoSecurePort, getIpcPort());
return new DatanodeRegistration(dnId, storageInfo,
new ExportedBlockKeys(), VersionInfo.getVersion());
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
any(Configuration.class))).thenReturn(changes);
final int port = datanode.getIpcPort();
final String address = "localhost:" + port;
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@BeforeClass
public static void setUp() throws Exception {
cluster = (new MiniDFSCluster.Builder(conf))
.numDataNodes(1).build();
nnAddress = cluster.getNameNode().getNameNodeAddress();
DataNode dn = cluster.getDataNodes().get(0);
dnAddress = new InetSocketAddress(dn.getDatanodeId().getIpAddr(),
dn.getIpcPort());
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
dns[i - curDatanodesNum] = dn;
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
int indexToShutdown = 0;
for (int i = 0; i < dataNodes.size(); i++) {
if (dataNodes.get(i).getIpcPort() == node.getIpcPort()) {
indexToShutdown = i;
break;
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
int realIpcPort = cluster.getDataNodes().get(0).getIpcPort();
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
内容来源于网络,如有侵权,请联系作者删除!