org.apache.hadoop.hdfs.server.datanode.DataNode.handleDiskError()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(3.0k)|赞(0)|评价(0)|浏览(109)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.handleDiskError()方法的一些代码示例,展示了DataNode.handleDiskError()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.handleDiskError()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:handleDiskError

DataNode.handleDiskError介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private void handleVolumeFailures(Set<FsVolumeSpi> unhealthyVolumes) {
 if (unhealthyVolumes.isEmpty()) {
  LOG.debug("handleVolumeFailures done with empty " +
    "unhealthyVolumes");
  return;
 }
 data.handleVolumeFailures(unhealthyVolumes);
 Set<StorageLocation> unhealthyLocations = new HashSet<>(
   unhealthyVolumes.size());
 StringBuilder sb = new StringBuilder("DataNode failed volumes:");
 for (FsVolumeSpi vol : unhealthyVolumes) {
  unhealthyLocations.add(vol.getStorageLocation());
  sb.append(vol.getStorageLocation()).append(";");
 }
 try {
  // Remove all unhealthy volumes from DataNode.
  removeVolumes(unhealthyLocations, false);
 } catch (IOException e) {
  LOG.warn("Error occurred when removing unhealthy storage dirs", e);
 }
 LOG.debug("{}", sb);
  // send blockreport regarding volume failure
 handleDiskError(sb.toString());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Check the disk error
 */
private void checkDiskError() {
 Set<File> unhealthyDataDirs = data.checkDataDir();
 if (unhealthyDataDirs != null && !unhealthyDataDirs.isEmpty()) {
  try {
   // Remove all unhealthy volumes from DataNode.
   removeVolumes(unhealthyDataDirs, false);
  } catch (IOException e) {
   LOG.warn("Error occurred when removing unhealthy storage dirs: "
     + e.getMessage(), e);
  }
  StringBuilder sb = new StringBuilder("DataNode failed volumes:");
  for (File dataDir : unhealthyDataDirs) {
   sb.append(dataDir.getAbsolutePath() + ";");
  }
  handleDiskError(sb.toString());
 }
}

代码示例来源:origin: io.fabric8/fabric-hadoop

/**
 *  Check if there is a disk failure and if so, handle the error
 *
 **/
protected void checkDiskError( ) {
 try {
  data.checkDataDir();
 } catch(DiskErrorException de) {
  handleDiskError(de.getMessage());
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Check the disk error
 */
private void checkDiskError() {
 Set<File> unhealthyDataDirs = data.checkDataDir();
 if (unhealthyDataDirs != null && !unhealthyDataDirs.isEmpty()) {
  try {
   // Remove all unhealthy volumes from DataNode.
   removeVolumes(unhealthyDataDirs, false);
  } catch (IOException e) {
   LOG.warn("Error occurred when removing unhealthy storage dirs: "
     + e.getMessage(), e);
  }
  StringBuilder sb = new StringBuilder("DataNode failed volumes:");
  for (File dataDir : unhealthyDataDirs) {
   sb.append(dataDir.getAbsolutePath() + ";");
  }
  handleDiskError(sb.toString());
 }
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

protected void checkDiskError( ) throws IOException {
 try {
  data.checkDataDir();
 } catch(DiskErrorException de) {
  handleDiskError(de.getMessage());
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

timeLastCheckDisk = System.currentTimeMillis();
} catch(DiskErrorException de) {
 handleDiskError(de.getMessage());
} finally {
 checkingDisk.set(false);

相关文章

DataNode类方法