org.apache.hadoop.hdfs.server.datanode.DataNode.createInterDataNodeProtocolProxy()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(4.5k)|赞(0)|评价(0)|浏览(197)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.createInterDataNodeProtocolProxy()方法的一些代码示例,展示了DataNode.createInterDataNodeProtocolProxy()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.createInterDataNodeProtocolProxy()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:createInterDataNodeProtocolProxy

DataNode.createInterDataNodeProtocolProxy介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

DatanodeID bpReg = getDatanodeID(bpid);
InterDatanodeProtocol proxyDN = bpReg.equals(id)?
  datanode: DataNode.createInterDataNodeProtocolProxy(id, conf,
  dnConf.socketTimeout, dnConf.connectToDnViaHostname);
ReplicaRecoveryInfo info = callInitReplicaRecovery(proxyDN, rBlock);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

DatanodeID bpReg = getDatanodeID(bpid);
InterDatanodeProtocol proxyDN = bpReg.equals(id) ?
  datanode : DataNode.createInterDataNodeProtocolProxy(id, conf,
  dnConf.socketTimeout, dnConf.connectToDnViaHostname);
ExtendedBlock internalBlk = new ExtendedBlock(block);

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public Boolean call() throws Exception {
  InterDatanodeProtocol remoteDatanode = null;
  try {
   File srcBlockFile = data.getBlockFile(srcNamespaceId, srcBlock);
   remoteDatanode = DataNode
     .createInterDataNodeProtocolProxy(target, getConf(), socketTimeout);
   remoteDatanode.copyBlockLocal(srcFileSystem, srcNamespaceId, srcBlock,
     dstNamespaceId, dstBlock,
     srcBlockFile.getAbsolutePath());
  } catch (IOException e) {
   LOG.warn("Cross datanode local block copy failed", e);
   throw e;
  } finally {
   if (remoteDatanode != null) {
    stopDatanodeProxy(remoteDatanode);
   }
  }
  return true;
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public static InterDatanodeProtocol createInterDatanodeProtocolProxy(
  DataNode dn, DatanodeID datanodeid, final Configuration conf,
  boolean connectToDnViaHostname) throws IOException {
 if (connectToDnViaHostname != dn.getDnConf().connectToDnViaHostname) {
  throw new AssertionError("Unexpected DN hostname configuration");
 }
 return DataNode.createInterDataNodeProtocolProxy(datanodeid, conf,
   dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

DatanodeRegistration bpReg = bpos.bpRegistration;
InterDatanodeProtocol datanode = bpReg.equals(id)?
  this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
    dnConf.socketTimeout, dnConf.connectToDnViaHostname);
ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

try {
 InterDatanodeProtocol datanode = dnRegistration.equals(id)?
   this: DataNode.createInterDataNodeProtocolProxy(id, getConf());
 BlockMetaDataInfo info = datanode.getBlockMetaDataInfo(block);
 if (info != null && info.getGenerationStamp() >= block.getGenerationStamp()) {

代码示例来源:origin: io.fabric8/fabric-hadoop

try {
 InterDatanodeProtocol datanode = dnRegistration.equals(id) ? this 
  : DataNode.createInterDataNodeProtocolProxy(
    id, getConf(), socketTimeout, connectToDnViaHostname);
 BlockRecoveryInfo info = datanode.startBlockRecovery(block);

代码示例来源:origin: com.facebook.hadoop/hadoop-core

+ getDNRegistrationForNS(namespaceId) + ") when recovering "
  + block);
datanode = DataNode.createInterDataNodeProtocolProxy(
id, getConf(), socketTimeout);
datanodeProxies.add(datanode);

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

DatanodeRegistration bpReg = bpos.bpRegistration;
InterDatanodeProtocol datanode = bpReg.equals(id)?
  this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
    dnConf.socketTimeout, dnConf.connectToDnViaHostname);
ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

InterDatanodeProtocol idp = DataNode.createInterDataNodeProtocolProxy(
  datanodeinfo[0], conf, datanode.socketTimeout);
assertTrue(datanode != null);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/** Test to verify that InterDatanode RPC timesout as expected when
  *  the server DN does not respond.
  */
 @Test(expected=SocketTimeoutException.class)
 public void testInterDNProtocolTimeout() throws Throwable {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
  InterDatanodeProtocol proxy = null;

  try {
   proxy = DataNode.createInterDataNodeProtocolProxy(
     dInfo, conf, 500, false);
   proxy.initReplicaRecovery(new RecoveringBlock(
     new ExtendedBlock("bpid", 1), null, 100));
   fail ("Expected SocketTimeoutException exception, but did not get.");
  } finally {
   if (proxy != null) {
    RPC.stopProxy(proxy);
   }
   server.stop();
  }
 }
}

相关文章

DataNode类方法