org.apache.hadoop.hdfs.server.datanode.DataNode.getDnConf()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(9.6k)|赞(0)|评价(0)|浏览(109)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getDnConf()方法的一些代码示例,展示了DataNode.getDnConf()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getDnConf()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getDnConf

DataNode.getDnConf介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

BlockRecoveryWorker(DataNode datanode) {
 this.datanode = datanode;
 conf = datanode.getConf();
 dnConf = datanode.getDnConf();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Connect to the first item in the target list.  Pass along the 
 * entire target list, the block, and the data.
 */
DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes,
  String[] targetStorageIds, ExtendedBlock b,
  BlockConstructionStage stage, final String clientname) {
 if (DataTransferProtocol.LOG.isDebugEnabled()) {
  DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " +
      "clientname={}, targets={}, target storage types={}, " +
      "target storage IDs={}", getClass().getSimpleName(), b,
    b.getNumBytes(), stage, clientname, Arrays.asList(targets),
    targetStorageTypes == null ? "[]" :
      Arrays.asList(targetStorageTypes),
    targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds));
 }
 this.targets = targets;
 this.targetStorageTypes = targetStorageTypes;
 this.targetStorageIds = targetStorageIds;
 this.b = b;
 this.stage = stage;
 BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
 bpReg = bpos.bpRegistration;
 this.clientname = clientname;
 this.cachingStrategy =
   new CachingStrategy(true, getDnConf().readaheadLength);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public FsDatasetCache(FsDatasetImpl dataset) {
 this.dataset = dataset;
 this.maxBytes = dataset.datanode.getDnConf().getMaxLockedMemory();
 ThreadFactory workerFactory = new ThreadFactoryBuilder()
   .setDaemon(true)

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

InetSocketAddress getSocketAddress4Transfer(DatanodeInfo dnInfo) {
 return NetUtils.createSocketAddr(dnInfo.getXferAddr(
   datanode.getDnConf().getConnectToDnViaHostname()));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private DataXceiver(Peer peer, DataNode datanode,
  DataXceiverServer dataXceiverServer) throws IOException {
 super(datanode.getTracer());
 this.peer = peer;
 this.dnConf = datanode.getDnConf();
 this.socketIn = peer.getInputStream();
 this.socketOut = peer.getOutputStream();
 this.datanode = datanode;
 this.dataXceiverServer = dataXceiverServer;
 this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname;
 this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(datanode.getConf());
 this.smallBufferSize = DFSUtilClient.getSmallBufferSize(datanode.getConf());
 remoteAddress = peer.getRemoteAddressString();
 final int colonIdx = remoteAddress.indexOf(':');
 remoteAddressWithoutPort =
   (colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
 localAddress = peer.getLocalAddressString();
 LOG.debug("Number of active connections is: {}",
   datanode.getXceiverCount());
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

BPServiceActor(InetSocketAddress nnAddr, InetSocketAddress lifelineNnAddr,
  BPOfferService bpos) {
 this.bpos = bpos;
 this.dn = bpos.getDataNode();
 this.nnAddr = nnAddr;
 this.lifelineSender = lifelineNnAddr != null ?
   new LifelineSender(lifelineNnAddr) : null;
 this.initialRegistrationComplete = lifelineNnAddr != null ?
   new CountDownLatch(1) : null;
 this.dnConf = dn.getDnConf();
 this.ibrManager = new IncrementalBlockReportManager(
   dnConf.ibrInterval,
   dn.getMetrics());
 prevBlockReportId = ThreadLocalRandom.current().nextLong();
 scheduler = new Scheduler(dnConf.heartBeatInterval,
   dnConf.getLifelineIntervalMs(), dnConf.blockReportInterval,
   dnConf.outliersReportIntervalMs);
 // get the value of maxDataLength.
 this.maxDataLength = dnConf.getMaxDataLength();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // FsDatasetSpi
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
  throws IOException {
 return initReplicaRecovery(rBlock.getBlock().getBlockPoolId(), volumeMap,
   rBlock.getBlock().getLocalBlock(), rBlock.getNewGenerationStamp(),
   datanode.getDnConf().getXceiverStopTimeout());
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

socket = datanode.newSocket();
NetUtils.connect(socket, targetAddr,
  datanode.getDnConf().getSocketTimeout());
socket.setTcpNoDelay(
  datanode.getDnConf().getDataTransferServerTcpNoDelay());
socket.setSoTimeout(datanode.getDnConf().getSocketTimeout());
    new StorageType[]{storageType}, new String[]{storageId});
long writeTimeout = datanode.getDnConf().getSocketWriteTimeout();
OutputStream unbufOut = NetUtils.getOutputStream(socket, writeTimeout);
InputStream unbufIn = NetUtils.getInputStream(socket);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private Peer newConnectedPeer(ExtendedBlock b, InetSocketAddress addr,
               Token<BlockTokenIdentifier> blockToken,
               DatanodeID datanodeId)
  throws IOException {
 Peer peer = null;
 boolean success = false;
 Socket sock = null;
 final int socketTimeout = datanode.getDnConf().getSocketTimeout();
 try {
  sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
  NetUtils.connect(sock, addr, socketTimeout);
  peer = DFSUtilClient.peerFromSocketAndKey(datanode.getSaslClient(),
    sock, datanode.getDataEncryptionKeyFactoryForBlock(b),
    blockToken, datanodeId, socketTimeout);
  success = true;
  return peer;
 } finally {
  if (!success) {
   IOUtils.cleanup(null, peer);
   IOUtils.closeSocket(sock);
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
InputStream input = socketIn;
try {

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // FsDatasetSpi
public ReplicaHandler recoverRbw(
  ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
  throws IOException {
 LOG.info("Recover RBW replica " + b);
 while (true) {
  try {
   try (AutoCloseableLock lock = datasetLock.acquire()) {
    ReplicaInfo replicaInfo =
      getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
    // check the replica's state
    if (replicaInfo.getState() != ReplicaState.RBW) {
     throw new ReplicaNotFoundException(
       ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
    }
    ReplicaInPipeline rbw = (ReplicaInPipeline)replicaInfo;
    if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
     throw new MustStopExistingWriter(rbw);
    }
    LOG.info("At " + datanode.getDisplayName() + ", Recovering " + rbw);
    return recoverRbwImpl(rbw, b, newGS, minBytesRcvd, maxBytesRcvd);
   }
  } catch (MustStopExistingWriter e) {
   e.getReplicaInPipeline().stopWriter(
     datanode.getDnConf().getXceiverStopTimeout());
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // FsDatasetSpi
public Replica recoverClose(ExtendedBlock b, long newGS,
  long expectedBlockLen) throws IOException {
 LOG.info("Recover failed close " + b);
 while (true) {
  try {
   try (AutoCloseableLock lock = datasetLock.acquire()) {
    // check replica's state
    ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
    // bump the replica's GS
    replicaInfo.bumpReplicaGS(newGS);
    // finalize the replica if RBW
    if (replicaInfo.getState() == ReplicaState.RBW) {
     finalizeReplica(b.getBlockPoolId(), replicaInfo);
    }
    return replicaInfo;
   }
  } catch (MustStopExistingWriter e) {
   e.getReplicaInPipeline()
     .stopWriter(datanode.getDnConf().getXceiverStopTimeout());
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

this.isDatanode = clientname.length() == 0;
this.isClient = !this.isDatanode;
this.restartBudget = datanode.getDnConf().restartReplicaExpiry;
this.datanodeSlowLogThresholdMs =
  datanode.getDnConf().getSlowIoWarningThresholdMs();
final long readTimeout = datanode.getDnConf().socketTimeout;
this.responseInterval = (long) (readTimeout * 0.5);
 datanode.getDnConf().dropCacheBehindWrites :
  cachingStrategy.getDropBehind();
this.syncBehindWrites = datanode.getDnConf().syncBehindWrites;
this.syncBehindWritesInBackground = datanode.getDnConf().
  syncBehindWritesInBackground;

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

volFailuresTolerated = datanode.getDnConf().getVolFailuresTolerated();
  dataLocations, storage);
volsConfigured = datanode.getDnConf().getVolsConfigured();
int volsFailed = volumeFailureInfos.size();
  datanode.getDnConf().getMaxLockedMemory() > 0) {
 lazyWriter = new Daemon(new LazyWriter(conf));
 lazyWriter.start();

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

this.dropCacheBehindAllReads = false;
 this.dropCacheBehindLargeReads =
   datanode.getDnConf().dropCacheBehindReads;
} else {
 this.dropCacheBehindAllReads =
 this.readaheadLength = datanode.getDnConf().readaheadLength;
} else {
 this.alwaysReadahead = true;
this.transferToAllowed = datanode.getDnConf().transferToAllowed &&
 (!is32Bit || length <= Integer.MAX_VALUE);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

boolean isReplaceBlock) throws IOException {
syncOnClose = datanode.getDnConf().syncOnClose;
dirSyncOnFinalize = syncOnClose;
boolean responderClosed = false;
   long joinTimeout = datanode.getDnConf().getXceiverStopTimeout();
   joinTimeout = joinTimeout > 1  ? joinTimeout*8/10 : joinTimeout;
   responder.join(joinTimeout);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

.stopWriter(datanode.getDnConf().getXceiverStopTimeout());

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public static InterDatanodeProtocol createInterDatanodeProtocolProxy(
  DataNode dn, DatanodeID datanodeid, final Configuration conf,
  boolean connectToDnViaHostname) throws IOException {
 if (connectToDnViaHostname != dn.getDnConf().connectToDnViaHostname) {
  throw new AssertionError("Unexpected DN hostname configuration");
 }
 return DataNode.createInterDataNodeProtocolProxy(datanodeid, conf,
   dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

throws IOException {
long startTimeMs = Time.monotonicNow();
long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
ReplicaInfo lastFoundReplicaInfo = null;
boolean isInPipeline = false;

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

BPServiceActor(InetSocketAddress nnAddr, BPOfferService bpos) {
 this.bpos = bpos;
 this.dn = bpos.getDataNode();
 this.nnAddr = nnAddr;
 this.dnConf = dn.getDnConf();
 this.ibrManager = new IncrementalBlockReportManager(dnConf.ibrInterval);
 scheduler = new Scheduler(dnConf.heartBeatInterval, dnConf.blockReportInterval);
}

相关文章

DataNode类方法