org.apache.hadoop.hdfs.server.datanode.DataNode.getStorageLocations()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(6.7k)|赞(0)|评价(0)|浏览(116)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getStorageLocations()方法的一些代码示例,展示了DataNode.getStorageLocations()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getStorageLocations()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getStorageLocations

DataNode.getStorageLocations介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

dataDirs = getStorageLocations(conf);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

Configuration conf = new Configuration();
conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
List<StorageLocation> newStorageLocations = getStorageLocations(conf);
for (StorageLocation loc : getStorageLocations(getConf())) {
 existingStorageLocations.put(loc.getNormalizedUri().toString(), loc);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/** Instantiate a single datanode object, along with its secure resources. 
 * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
 * subsequently. 
 */
public static DataNode instantiateDataNode(String args [], Configuration conf,
  SecureResources resources) throws IOException {
 if (conf == null)
  conf = new HdfsConfiguration();
 
 if (args != null) {
  // parse generic hadoop options
  GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
  args = hParser.getRemainingArgs();
 }
 
 if (!parseArguments(args, conf)) {
  printUsage(System.err);
  return null;
 }
 Collection<StorageLocation> dataLocations = getStorageLocations(conf);
 UserGroupInformation.setConfiguration(conf);
 SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,
   DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, getHostName(conf));
 return makeInstance(dataLocations, conf, resources);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
  dataLocations, storage);

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

private String formatThreadName() {
 Collection<StorageLocation> dataDirs =
   DataNode.getStorageLocations(dn.getConf());
 return "DataNode: [" + dataDirs.toString() + "] " +
  " heartbeating to " + nnAddr;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private String formatThreadName() {
 Collection<StorageLocation> dataDirs =
   DataNode.getStorageLocations(dn.getConf());
 return "DataNode: [" + dataDirs.toString() + "] " +
  " heartbeating to " + nnAddr;
}

代码示例来源:origin: linkedin/dynamometer

public SimulatedMultiStorageFSDataset(DataNode datanode, Configuration conf) {
 super(datanode, null, conf);
 this.datanode = datanode;
 int storageCount = DataNode.getStorageLocations(conf).size();
 this.datanodeUuid = "SimulatedDatanode-" + DataNode.generateUuid();
 this.storages = new ArrayList<>();
 for (int i = 0; i < storageCount; i++) {
  this.storages.add(new SimulatedStorage(
    conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY),
    conf.getEnum(CONFIG_PROPERTY_STATE, DEFAULT_STATE)));
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

Configuration conf = new Configuration();
conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
List<StorageLocation> locations = getStorageLocations(conf);

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

Configuration conf = new Configuration();
conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
List<StorageLocation> locations = getStorageLocations(conf);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

conf.set(DFS_DATANODE_DATA_DIR_KEY,
  Joiner.on(",").join(effectiveVolumes));
dataDirs = getStorageLocations(conf);

代码示例来源:origin: linkedin/dynamometer

DataNode.getStorageLocations(getConf()).get(0).getFile().getAbsolutePath());
SimulatedMultiStorageFSDataset.setFactory(getConf());
getConf().setLong(SimulatedMultiStorageFSDataset.CONFIG_PROPERTY_CAPACITY, STORAGE_CAPACITY);

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

conf.set(DFS_DATANODE_DATA_DIR_KEY,
  Joiner.on(",").join(effectiveVolumes));
dataDirs = getStorageLocations(conf);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

locations = DataNode.getStorageLocations(conf);
assertThat(locations.size(), is(5));
assertThat(locations.get(0).getStorageType(), is(StorageType.DISK));
conf.set(DFS_DATANODE_DATA_DIR_KEY, locations2);
try {
 locations = DataNode.getStorageLocations(conf);
 fail();
} catch(IllegalArgumentException iae) {
locations = DataNode.getStorageLocations(conf);
assertThat(locations.size(), is(2));
assertThat(locations.get(0).getStorageType(), is(StorageType.DISK));

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

assertThat(outputs.size(), is(8));  // 3 (SUCCESS) + 4 (FAILED)
List<StorageLocation> locations = DataNode.getStorageLocations(
  datanode.getConf());
assertThat(locations.size(), is(1));

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
  dataLocations, storage);

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
  dataLocations, storage);

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/** Instantiate a single datanode object, along with its secure resources. 
 * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
 * subsequently. 
 */
public static DataNode instantiateDataNode(String args [], Configuration conf,
  SecureResources resources) throws IOException {
 if (conf == null)
  conf = new HdfsConfiguration();
 
 if (args != null) {
  // parse generic hadoop options
  GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
  args = hParser.getRemainingArgs();
 }
 
 if (!parseArguments(args, conf)) {
  printUsage(System.err);
  return null;
 }
 Collection<StorageLocation> dataLocations = getStorageLocations(conf);
 UserGroupInformation.setConfiguration(conf);
 SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,
   DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
 return makeInstance(dataLocations, conf, resources);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/** Instantiate a single datanode object, along with its secure resources. 
 * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
 * subsequently. 
 */
public static DataNode instantiateDataNode(String args [], Configuration conf,
  SecureResources resources) throws IOException {
 if (conf == null)
  conf = new HdfsConfiguration();
 
 if (args != null) {
  // parse generic hadoop options
  GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
  args = hParser.getRemainingArgs();
 }
 
 if (!parseArguments(args, conf)) {
  printUsage(System.err);
  return null;
 }
 Collection<StorageLocation> dataLocations = getStorageLocations(conf);
 UserGroupInformation.setConfiguration(conf);
 SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,
   DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
 return makeInstance(dataLocations, conf, resources);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

assertEquals(DataNode.getStorageLocations(dn.getConf()).size(),
  dnDescriptor.getStorageInfos().length);

相关文章

DataNode类方法