本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getConf()
方法的一些代码示例,展示了DataNode.getConf()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getConf()
方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getConf
暂无
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public BlockScanner(DataNode datanode) {
this(datanode, datanode.getConf());
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
@Override // DataNodeMXBean
public String getHttpPort(){
return this.getConf().get("dfs.datanode.info.port");
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Get timeout value of each OOB type from configuration
*/
private void initOOBTimeout() {
final int oobStart = Status.OOB_RESTART_VALUE; // the first OOB type
final int oobEnd = Status.OOB_RESERVED3_VALUE; // the last OOB type
final int numOobTypes = oobEnd - oobStart + 1;
oobTimeouts = new long[numOobTypes];
final String[] ele = getConf().get(DFS_DATANODE_OOB_TIMEOUT_KEY,
DFS_DATANODE_OOB_TIMEOUT_DEFAULT).split(",");
for (int i = 0; i < numOobTypes; i++) {
oobTimeouts[i] = (i < ele.length) ? Long.parseLong(ele[i]) : 0;
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
@Override // DataNodeMXBean
public String getRpcPort(){
InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
this.getConf().get(DFS_DATANODE_IPC_ADDRESS_KEY));
return Integer.toString(ipcAddr.getPort());
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Connect to the NN for the lifeline protocol. This is separated out for
* easier testing.
*
* @param lifelineNnAddr address of lifeline RPC server
* @return lifeline RPC proxy
*/
DatanodeLifelineProtocolClientSideTranslatorPB connectToLifelineNN(
InetSocketAddress lifelineNnAddr) throws IOException {
return new DatanodeLifelineProtocolClientSideTranslatorPB(lifelineNnAddr,
getConf());
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
@Override // DataNodeMXBean
public String getDataPort(){
InetSocketAddress dataAddr = NetUtils.createSocketAddr(
this.getConf().get(DFS_DATANODE_ADDRESS_KEY));
return Integer.toString(dataAddr.getPort());
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Connect to the NN. This is separated out for easier testing.
*/
DatanodeProtocolClientSideTranslatorPB connectToNN(
InetSocketAddress nnAddr) throws IOException {
return new DatanodeProtocolClientSideTranslatorPB(nnAddr, getConf());
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
BlockRecoveryWorker(DataNode datanode) {
this.datanode = datanode;
conf = datanode.getConf();
dnConf = datanode.getDnConf();
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
IOStreamPair connectToDN(DatanodeInfo datanodeID, int timeout,
ExtendedBlock block,
Token<BlockTokenIdentifier> blockToken)
throws IOException {
return DFSUtilClient.connectToDN(datanodeID, timeout, getConf(),
saslClient, NetUtils.getDefaultSocketFactory(getConf()), false,
getDataEncryptionKeyFactoryForBlock(block), blockToken);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
if (storageType.isTransient()) {
return null;
}
if (dataset.datanode == null) {
// FsVolumeImpl is used in test.
return null;
}
final int maxNumThreads = dataset.datanode.getConf().getInt(
DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY,
DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT);
ThreadFactory workerFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("FsVolumeImplWorker-" + parent.toString() + "-%d")
.build();
ThreadPoolExecutor executor = new ThreadPoolExecutor(
1, maxNumThreads,
60, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(),
workerFactory);
executor.allowCoreThreadTimeOut(true);
return executor;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
this.deferredUncachingExecutor = new ScheduledThreadPoolExecutor(
1, workerFactory);
this.revocationMs = dataset.datanode.getConf().getLong(
DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS,
DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS_DEFAULT);
long confRevocationPollingMs = dataset.datanode.getConf().getLong(
DFS_DATANODE_CACHE_REVOCATION_POLLING_MS,
DFS_DATANODE_CACHE_REVOCATION_POLLING_MS_DEFAULT);
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
@Override // ClientDatanodeProtocol
public void refreshNamenodes() throws IOException {
checkSuperuserPrivilege();
setConf(new Configuration());
refreshNamenodes(getConf());
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Start a timer to periodically write DataNode metrics to the log file. This
* behavior can be disabled by configuration.
*
*/
protected void startMetricsLogger() {
long metricsLoggerPeriodSec = getConf().getInt(
DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY,
DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT);
if (metricsLoggerPeriodSec <= 0) {
return;
}
MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG);
// Schedule the periodic logging.
metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG,
"DataNode", (short) 0), metricsLoggerPeriodSec, metricsLoggerPeriodSec,
TimeUnit.SECONDS);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
BlockChecksumComputer(DataNode datanode,
ExtendedBlock block,
BlockChecksumOptions blockChecksumOptions)
throws IOException {
super(datanode, blockChecksumOptions);
this.block = block;
this.requestLength = block.getNumBytes();
Preconditions.checkArgument(requestLength >= 0);
this.metadataIn = datanode.data.getMetaDataInputStream(block);
this.visibleLength = datanode.data.getReplicaVisibleLength(block);
this.partialBlk = requestLength < visibleLength;
int ioFileBufferSize =
DFSUtilClient.getIoFileBufferSize(datanode.getConf());
this.checksumIn = new DataInputStream(
new BufferedInputStream(metadataIn, ioFileBufferSize));
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Add a list of volumes to be managed by DataStorage. If the volume is empty,
* format it, otherwise recover it from previous transitions if required.
*
* @param datanode the reference to DataNode.
* @param nsInfo namespace information
* @param dataDirs array of data storage directories
* @param startOpt startup option
* @return a list of successfully loaded storage directories.
*/
@VisibleForTesting
synchronized List<StorageDirectory> addStorageLocations(DataNode datanode,
NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
StartupOption startOpt) throws IOException {
final int numThreads = getParallelVolumeLoadThreadsNum(
dataDirs.size(), datanode.getConf());
final ExecutorService executor = Executors.newFixedThreadPool(numThreads);
try {
final List<StorageLocation> successLocations = loadDataStorage(
datanode, nsInfo, dataDirs, startOpt, executor);
return loadBlockPoolSliceStorage(
datanode, nsInfo, successLocations, startOpt, executor);
} finally {
executor.shutdown();
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
private DataXceiver(Peer peer, DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException {
super(datanode.getTracer());
this.peer = peer;
this.dnConf = datanode.getDnConf();
this.socketIn = peer.getInputStream();
this.socketOut = peer.getOutputStream();
this.datanode = datanode;
this.dataXceiverServer = dataXceiverServer;
this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname;
this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(datanode.getConf());
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(datanode.getConf());
remoteAddress = peer.getRemoteAddressString();
final int colonIdx = remoteAddress.indexOf(':');
remoteAddressWithoutPort =
(colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
localAddress = peer.getLocalAddressString();
LOG.debug("Number of active connections is: {}",
datanode.getXceiverCount());
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* @see DFSUtil#getHttpPolicy(org.apache.hadoop.conf.Configuration)
* for information related to the different configuration options and
* Http Policy is decided.
*/
private void startInfoServer()
throws IOException {
// SecureDataNodeStarter will bind the privileged port to the channel if
// the DN is started by JSVC, pass it along.
ServerSocketChannel httpServerChannel = secureResources != null ?
secureResources.getHttpServerChannel() : null;
httpServer = new DatanodeHttpServer(getConf(), this, httpServerChannel);
httpServer.start();
if (httpServer.getHttpAddress() != null) {
infoPort = httpServer.getHttpAddress().getPort();
}
if (httpServer.getHttpsAddress() != null) {
infoSecurePort = httpServer.getHttpsAddress().getPort();
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/** Start a single datanode daemon and wait for it to finish.
* If this thread is specifically interrupted, it will stop waiting.
*/
public void runDatanodeDaemon() throws IOException {
blockPoolManager.startAll();
// start dataXceiveServer
dataXceiverServer.start();
if (localDataXceiverServer != null) {
localDataXceiverServer.start();
}
ipcServer.setTracer(tracer);
ipcServer.start();
startPlugins(getConf());
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Allows submission of a disk balancer Job.
* @param planID - Hash value of the plan.
* @param planVersion - Plan version, reserved for future use. We have only
* version 1 now.
* @param planFile - Plan file name
* @param planData - Actual plan data in json format
* @throws IOException
*/
@Override
public void submitDiskBalancerPlan(String planID, long planVersion,
String planFile, String planData, boolean skipDateCheck)
throws IOException {
checkSuperuserPrivilege();
if (getStartupOption(getConf()) != StartupOption.REGULAR) {
throw new DiskBalancerException(
"Datanode is in special state, e.g. Upgrade/Rollback etc."
+ " Disk balancing not permitted.",
DiskBalancerException.Result.DATANODE_STATUS_NOT_REGULAR);
}
getDiskBalancer().submitPlan(planID, planVersion, planFile, planData,
skipDateCheck);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Creates a dummy DataNode for testing purpose.
*/
@VisibleForTesting
@InterfaceAudience.LimitedPrivate("HDFS")
DataNode(final Configuration conf) throws DiskErrorException {
super(conf);
this.tracer = createTracer(conf);
this.tracerConfigurationManager =
new TracerConfigurationManager(DATANODE_HTRACE_PREFIX, conf);
this.fileIoProvider = new FileIoProvider(conf, this);
this.fileDescriptorPassingDisabledReason = null;
this.maxNumberOfBlocksToLog = 0;
this.confVersion = null;
this.usersWithLocalPathAccess = null;
this.connectToDnViaHostname = false;
this.blockScanner = new BlockScanner(this, this.getConf());
this.pipelineSupportECN = false;
this.socketFactory = NetUtils.getDefaultSocketFactory(conf);
this.dnConf = new DNConf(this);
initOOBTimeout();
storageLocationChecker = null;
volumeChecker = new DatasetVolumeChecker(conf, new Timer());
}
内容来源于网络,如有侵权,请联系作者删除!