本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.checkDiskErrorAsync()
方法的一些代码示例,展示了DataNode.checkDiskErrorAsync()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.checkDiskErrorAsync()
方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:checkDiskErrorAsync
[英]Check if there is a disk failure asynchronously and if so, handle the error
[中]检查是否存在异步磁盘故障,如果存在,请处理错误
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
private void onFailure(@Nullable FsVolumeSpi volume, long begin) {
if (datanode != null && volume != null) {
datanode.checkDiskErrorAsync(volume);
}
profilingEventHook.onFailure(volume, begin);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Find the file corresponding to the block and return it if it exists.
*/
ReplicaInfo validateBlockFile(String bpid, long blockId) {
//Should we check for metadata file too?
final ReplicaInfo r;
try (AutoCloseableLock lock = datasetLock.acquire()) {
r = volumeMap.get(bpid, blockId);
}
if (r != null) {
if (r.blockDataExists()) {
return r;
}
// if file is not null, but doesn't exist - possibly disk failed
datanode.checkDiskErrorAsync(r.getVolume());
}
if (LOG.isDebugEnabled()) {
LOG.debug("blockId=" + blockId + ", replica=" + r);
}
return null;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Find the file corresponding to the block and return it if it exists.
*/
File validateBlockFile(String bpid, long blockId) {
//Should we check for metadata file too?
final File f;
synchronized(this) {
f = getFile(bpid, blockId, false);
}
if(f != null ) {
if(f.exists())
return f;
// if file is not null, but doesn't exist - possibly disk failed
datanode.checkDiskErrorAsync();
}
if (LOG.isDebugEnabled()) {
LOG.debug("blockId=" + blockId + ", f=" + f);
}
return null;
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
/**
* Find the file corresponding to the block and return it if it exists.
*/
File validateBlockFile(String bpid, long blockId) {
//Should we check for metadata file too?
final File f;
synchronized(this) {
f = getFile(bpid, blockId, false);
}
if(f != null ) {
if(f.exists())
return f;
// if file is not null, but doesn't exist - possibly disk failed
datanode.checkDiskErrorAsync();
}
if (LOG.isDebugEnabled()) {
LOG.debug("blockId=" + blockId + ", f=" + f);
}
return null;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
LOG.warn("Exception occured while compiling report: ", ioe);
datanode.checkDiskErrorAsync();
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
LOG.warn("Exception occured while compiling report: ", ioe);
datanode.checkDiskErrorAsync();
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
datanode.checkDiskErrorAsync();
throw ioe;
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
datanode.checkDiskErrorAsync();
throw ioe;
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
LOG.warn("IOException in BlockReceiver.run(): ", e);
if (running) {
datanode.checkDiskErrorAsync();
LOG.info(myString, e);
running = false;
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
targets[0] + " got ", ie);
checkDiskErrorAsync();
} finally {
xmitsInProgress.getAndDecrement();
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
targets[0] + " got ", ie);
checkDiskErrorAsync();
} finally {
xmitsInProgress.getAndDecrement();
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Checks whether {@link DataNode#checkDiskErrorAsync()} is being called or not.
* Before refactoring the code the above function was not getting called
* @throws IOException, InterruptedException
*/
@Test
public void testcheckDiskError() throws IOException, InterruptedException {
if(cluster.getDataNodes().size() <= 0) {
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitActive();
}
DataNode dataNode = cluster.getDataNodes().get(0);
long slackTime = dataNode.checkDiskErrorInterval/2;
//checking for disk error
dataNode.checkDiskErrorAsync();
Thread.sleep(dataNode.checkDiskErrorInterval);
long lastDiskErrorCheck = dataNode.getLastDiskErrorCheck();
assertTrue("Disk Error check is not performed within " + dataNode.checkDiskErrorInterval + " ms", ((Time.monotonicNow()-lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime)));
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
datanode.checkDiskErrorAsync();
throw iex;
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
datanode.checkDiskErrorAsync();
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
LOG.warn("IOException in BlockReceiver.run(): ", e);
if (running) {
datanode.checkDiskErrorAsync();
LOG.info(myString, e);
running = false;
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
DataNode dn0 = cluster.getDataNodes().get(0);
long lastDiskErrorCheck = dn0.getLastDiskErrorCheck();
dn0.checkDiskErrorAsync();
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
datanode.checkDiskErrorAsync();
throw iex;
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
dn.checkDiskErrorAsync();
while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
Thread.sleep(100);
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
datanode.checkDiskErrorAsync();
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Test metrics associated with volume failures.
*/
@Test
public void testVolumeFailures() throws Exception {
assertGauge("VolumeFailuresTotal", 0, getMetrics(NS_METRICS));
assertGauge("EstimatedCapacityLostTotal", 0L, getMetrics(NS_METRICS));
DataNode dn = cluster.getDataNodes().get(0);
FsVolumeSpi fsVolume =
DataNodeTestUtils.getFSDataset(dn).getVolumes().get(0);
File dataDir = new File(fsVolume.getBasePath());
long capacity = ((FsVolumeImpl) fsVolume).getCapacity();
DataNodeTestUtils.injectDataDirFailure(dataDir);
long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
dn.checkDiskErrorAsync();
while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
Thread.sleep(100);
}
DataNodeTestUtils.triggerHeartbeat(dn);
BlockManagerTestUtil.checkHeartbeat(bm);
assertGauge("VolumeFailuresTotal", 1, getMetrics(NS_METRICS));
assertGauge("EstimatedCapacityLostTotal", capacity, getMetrics(NS_METRICS));
}
内容来源于网络,如有侵权,请联系作者删除!