org.apache.hadoop.hdfs.server.datanode.DataNode.removeVolumes()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(4.4k)|赞(0)|评价(0)|浏览(115)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.removeVolumes()方法的一些代码示例,展示了DataNode.removeVolumes()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.removeVolumes()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:removeVolumes

DataNode.removeVolumes介绍

[英]Remove volumes from DataNode. See #removeVolumes(Collection,boolean) for details.
[中]从DataNode中删除卷。有关详细信息,请参见#removeVolumes(集合,布尔值)。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Remove volumes from DataNode.
 * See {@link #removeVolumes(Collection, boolean)} for details.
 *
 * @param locations the StorageLocations of the volumes to be removed.
 * @throws IOException
 */
private void removeVolumes(final Collection<StorageLocation> locations)
 throws IOException {
 if (locations.isEmpty()) {
  return;
 }
 removeVolumes(locations, true);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private void handleVolumeFailures(Set<FsVolumeSpi> unhealthyVolumes) {
 if (unhealthyVolumes.isEmpty()) {
  LOG.debug("handleVolumeFailures done with empty " +
    "unhealthyVolumes");
  return;
 }
 data.handleVolumeFailures(unhealthyVolumes);
 Set<StorageLocation> unhealthyLocations = new HashSet<>(
   unhealthyVolumes.size());
 StringBuilder sb = new StringBuilder("DataNode failed volumes:");
 for (FsVolumeSpi vol : unhealthyVolumes) {
  unhealthyLocations.add(vol.getStorageLocation());
  sb.append(vol.getStorageLocation()).append(";");
 }
 try {
  // Remove all unhealthy volumes from DataNode.
  removeVolumes(unhealthyLocations, false);
 } catch (IOException e) {
  LOG.warn("Error occurred when removing unhealthy storage dirs", e);
 }
 LOG.debug("{}", sb);
  // send blockreport regarding volume failure
 handleDiskError(sb.toString());
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

removeVolumes(changedVolumes.deactivateLocations);
} catch (IOException e) {
 errorMessageBuilder.append(e.getMessage());

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Remove volumes from DataNode.
 * See {@link removeVolumes(final Set<File>, boolean)} for details.
 *
 * @param locations the StorageLocations of the volumes to be removed.
 * @throws IOException
 */
private void removeVolumes(final Collection<StorageLocation> locations)
 throws IOException {
 if (locations.isEmpty()) {
  return;
 }
 Set<File> volumesToRemove = new HashSet<>();
 for (StorageLocation loc : locations) {
  volumesToRemove.add(loc.getFile().getAbsoluteFile());
 }
 removeVolumes(volumesToRemove, true);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Remove volumes from DataNode.
 * See {@link removeVolumes(final Set<File>, boolean)} for details.
 *
 * @param locations the StorageLocations of the volumes to be removed.
 * @throws IOException
 */
private void removeVolumes(final Collection<StorageLocation> locations)
 throws IOException {
 if (locations.isEmpty()) {
  return;
 }
 Set<File> volumesToRemove = new HashSet<>();
 for (StorageLocation loc : locations) {
  volumesToRemove.add(loc.getFile().getAbsoluteFile());
 }
 removeVolumes(volumesToRemove, true);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Check the disk error
 */
private void checkDiskError() {
 Set<File> unhealthyDataDirs = data.checkDataDir();
 if (unhealthyDataDirs != null && !unhealthyDataDirs.isEmpty()) {
  try {
   // Remove all unhealthy volumes from DataNode.
   removeVolumes(unhealthyDataDirs, false);
  } catch (IOException e) {
   LOG.warn("Error occurred when removing unhealthy storage dirs: "
     + e.getMessage(), e);
  }
  StringBuilder sb = new StringBuilder("DataNode failed volumes:");
  for (File dataDir : unhealthyDataDirs) {
   sb.append(dataDir.getAbsolutePath() + ";");
  }
  handleDiskError(sb.toString());
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Check the disk error
 */
private void checkDiskError() {
 Set<File> unhealthyDataDirs = data.checkDataDir();
 if (unhealthyDataDirs != null && !unhealthyDataDirs.isEmpty()) {
  try {
   // Remove all unhealthy volumes from DataNode.
   removeVolumes(unhealthyDataDirs, false);
  } catch (IOException e) {
   LOG.warn("Error occurred when removing unhealthy storage dirs: "
     + e.getMessage(), e);
  }
  StringBuilder sb = new StringBuilder("DataNode failed volumes:");
  for (File dataDir : unhealthyDataDirs) {
   sb.append(dataDir.getAbsolutePath() + ";");
  }
  handleDiskError(sb.toString());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

removeVolumes(changedVolumes.deactivateLocations);
} catch (IOException e) {
 errorMessageBuilder.append(e.getMessage());

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

removeVolumes(changedVolumes.deactivateLocations);
} catch (IOException e) {
 errorMessageBuilder.append(e.getMessage());

相关文章

DataNode类方法