org.apache.hadoop.hdfs.server.datanode.DataNode.getXferAddress()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(5.2k)|赞(0)|评价(0)|浏览(104)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getXferAddress()方法的一些代码示例,展示了DataNode.getXferAddress()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getXferAddress()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getXferAddress

DataNode.getXferAddress介绍

[英]NB: The datanode can perform data transfer on the streaming address however clients are given the IPC IP address for data transfer, and that may be a different address.
[中]注意:datanode可以在流地址上执行数据传输,但是为客户端提供了用于数据传输的IPC IP地址,这可能是不同的地址。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

try {
 IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
  socketIn, datanode.getXferAddress().getPort(),
  datanode.getDatanodeId());
 input = new BufferedInputStream(saslStreams.in,

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private InetSocketAddress[] getDatanodes(Random rand) {
 //Get some unique random indexes
 int idx1 = rand.nextInt(NUM_DATA_NODES);
 int idx2;
 
 do {
  idx2 = rand.nextInt(NUM_DATA_NODES);
 } while (idx1 == idx2);
 
 int idx3;
 do {
  idx3 = rand.nextInt(NUM_DATA_NODES);
 } while (idx2 == idx3 || idx1 == idx3);
 
 InetSocketAddress[] addrs = new InetSocketAddress[3];
 addrs[0] = datanodes.get(idx1).getXferAddress();
 addrs[1] = datanodes.get(idx2).getXferAddress();
 addrs[2] = datanodes.get(idx3).getXferAddress();
 return addrs;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private InetSocketAddress getArbitraryLocalHostAddr() 
   throws UnknownHostException{
  Random rand = new Random(System.currentTimeMillis());
  int port = rand.nextInt(65535);
  while (true) {
   boolean conflict = false;
   for (DataNode d : datanodes) {
    if (d.getXferAddress().getPort() == port) {
     port = rand.nextInt(65535);
     conflict = true;
    }
   }
   if (conflict == false) {
    break;
   }
  }
  return new InetSocketAddress(InetAddress.getLocalHost(), port);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
  boolean keepPort) throws IOException {
 Configuration conf = dnprop.conf;
 String[] args = dnprop.dnArgs;
 SecureResources secureResources = dnprop.secureResources;
 Configuration newconf = new HdfsConfiguration(conf); // save cloned config
 if (keepPort) {
  InetSocketAddress addr = dnprop.datanode.getXferAddress();
  conf.set(DFS_DATANODE_ADDRESS_KEY, 
    addr.getAddress().getHostAddress() + ":" + addr.getPort());
  conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
    addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); 
 }
 DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
 dataNodes.add(new DataNodeProperties(
   newDn, newconf, args, secureResources, newDn.getIpcPort()));
 numDataNodes++;
 return true;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Stores the information related to a namenode in the cluster
 */
public static class NameNodeInfo {
 final NameNode nameNode;
 final Configuration conf;
 final String nameserviceId;
 final String nnId;
 StartupOption startOpt;
 NameNodeInfo(NameNode nn, String nameserviceId, String nnId,
   StartupOption startOpt, Configuration conf) {
  this.nameNode = nn;
  this.nameserviceId = nameserviceId;
  this.nnId = nnId;
  this.startOpt = startOpt;
  this.conf = conf;
 }
 
 public void setStartOpt(StartupOption startOpt) {
  this.startOpt = startOpt;
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

try {
 IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
  socketIn, datanode.getXferAddress().getPort(),
  datanode.getDatanodeId());
 input = new BufferedInputStream(saslStreams.in,

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

DataNode dn = dns.get(0);
String selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
dn = dns.get(0);
selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
dn = dns.get(0);
selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

final String[] hosts = new String[addrs.length];
for (int i = 0; i < addrs.length; i++) {
 addrs[i] = datanodes.get(i).getXferAddress();
 hosts[i] = addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort();

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

String ipAddr = dn.getXferAddress().getAddress().getHostAddress();
if (racks != null) {
 int port = dn.getXferAddress().getPort();
 if (nodeGroups == null) {
  LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

String dnAddress = dnProp.datanode.getXferAddress().toString();
if (dnAddress.startsWith("/")) {
 dnAddress = dnAddress.substring(1);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

InetSocketAddress target = datanode.getXferAddress();
Socket s = new Socket(target.getAddress(), target.getPort());

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

String dnAddress = dnProp.datanode.getXferAddress().toString();
if (dnAddress.startsWith("/")) {
 dnAddress = dnAddress.substring(1);

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

try {
 IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
  socketIn, datanode.getXferAddress().getPort(),
  datanode.getDatanodeId());
 input = new BufferedInputStream(saslStreams.in,

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

int port = cluster.getDataNodes().get(i).getXferAddress().getPort();
favoredNodes[i] = new InetSocketAddress(hosts[i], port);

相关文章

DataNode类方法