org.apache.hadoop.hdfs.server.datanode.DataNode.createDataNode()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(9.5k)|赞(0)|评价(0)|浏览(159)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.createDataNode()方法的一些代码示例,展示了DataNode.createDataNode()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.createDataNode()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:createDataNode

DataNode.createDataNode介绍

[英]Instantiate & Start a single datanode daemon and wait for it to finish. If this thread is specifically interrupted, it will stop waiting.
[中]实例化并启动单个datanode守护进程,并等待其完成。如果此线程被特别中断,它将停止等待。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/** Instantiate & Start a single datanode daemon and wait for it to finish.
 *  If this thread is specifically interrupted, it will stop waiting.
 */
@VisibleForTesting
public static DataNode createDataNode(String args[],
                Configuration conf) throws IOException {
 return createDataNode(args, conf, null);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public static void secureMain(String args[], SecureResources resources) {
 int errorCode = 0;
 try {
  StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
  DataNode datanode = createDataNode(args, null, resources);
  if (datanode != null) {
   datanode.join();
  } else {
   errorCode = 1;
  }
 } catch (Throwable e) {
  LOG.error("Exception in secureMain", e);
  terminate(1, e);
 } finally {
  // We need to terminate the process here because either shutdown was called
  // or some disk related conditions like volumes tolerated or volumes required
  // condition was not met. Also, In secure mode, control will go to Jsvc
  // and Datanode process hangs if it does not exit.
  LOG.warn("Exiting Datanode");
  terminate(errorCode);
 }
}

代码示例来源:origin: io.fabric8/fabric-hadoop

/** Instantiate & Start a single datanode daemon and wait for it to finish.
 *  If this thread is specifically interrupted, it will stop waiting.
 */
public static DataNode createDataNode(String args[],
                Configuration conf) throws IOException {
 return createDataNode(args, conf, null);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/** Instantiate & Start a single datanode daemon and wait for it to finish.
 *  If this thread is specifically interrupted, it will stop waiting.
 */
@VisibleForTesting
public static DataNode createDataNode(String args[],
                Configuration conf) throws IOException {
 return createDataNode(args, conf, null);
}

代码示例来源:origin: io.fabric8/fabric-hadoop

@Override
protected DataNode doCreate(Dictionary properties) throws Exception {
  Configuration conf = new Configuration();
  for (Enumeration e = properties.keys(); e.hasMoreElements();) {
    Object key = e.nextElement();
    Object val = properties.get(key);
    conf.set( key.toString(), val.toString() );
  }
  DataNode dataNode = DataNode.createDataNode(null, conf);
  return dataNode;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/** Instantiate & Start a single datanode daemon and wait for it to finish.
 *  If this thread is specifically interrupted, it will stop waiting.
 */
@VisibleForTesting
public static DataNode createDataNode(String args[],
                Configuration conf) throws IOException {
 return createDataNode(args, conf, null);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
 * Start the datanode.
 */
public DataNode startDataNode(int index, Configuration config) 
throws IOException {
 String dataDir = getTestingDir();
 File dataNodeDir = new File(dataDir, "data-" + index);
 config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath());
 String[] args = new String[] {};
 // NameNode will modify config with the ports it bound to
 return DataNode.createDataNode(args, config);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
 * Check whether the datanode can be started.
 */
private boolean canStartDataNode(Configuration conf) throws IOException {
 DataNode dn = null;
 try {
  dn = DataNode.createDataNode(new String[]{}, conf);
 } catch(IOException e) {
  if (e instanceof java.net.BindException)
   return false;
  throw e;
 } finally {
  if(dn != null) dn.shutdown();
 }
 return true;
}

代码示例来源:origin: griddynamics/jagger

@Override
  public void run() {
    log.info("Starting DataNode...");
    while (!ready) {
      try {
        if (startupProperties != null) {
          dataNode = DataNode.createDataNode(null, HadoopUtils.toConfiguration(startupProperties));
          ready = true;
        } else Thread.sleep(10000);
      } catch (Exception e) {
        log.warn("Failed start DataNode: {}", e);
        try {
          Thread.sleep(10000);
        } catch (InterruptedException e1) {
          log.warn("Interrupted");
          return;
        }
      }
    }
    /*new Thread() {
      public void run() {
        dataNode.run();
      }
    }.start();*/
    log.info("DataNode started");
  }
});

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Start the datanode.
 */
public DataNode startDataNode(int index, Configuration config) 
throws IOException {
 File dataNodeDir = new File(TEST_DATA_DIR, "data-" + index);
 config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath());
 String[] args = new String[] {};
 // NameNode will modify config with the ports it bound to
 return DataNode.createDataNode(args, config);
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/**
 */
public static void main(String args[]) {
 try {
  StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
  DataNode datanode = createDataNode(args, null);
  if (datanode != null)
   datanode.join();
 } catch (Throwable e) {
  LOG.error(StringUtils.stringifyException(e));
  System.exit(-1);
 }
}

代码示例来源:origin: io.fabric8/fabric-hadoop

public static void secureMain(String [] args, SecureResources resources) {
 try {
  StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
  DataNode datanode = createDataNode(args, null, resources);
  if (datanode != null)
   datanode.join();
 } catch (Throwable e) {
  LOG.error(StringUtils.stringifyException(e));
  System.exit(-1);
 } finally {
  // We need to add System.exit here because either shutdown was called or
  // some disk related conditions like volumes tolerated or volumes required
  // condition was not met. Also, In secure mode, control will go to Jsvc and
  // the process hangs without System.exit.
  LOG.info("Exiting Datanode");
  System.exit(0);
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 */
public static void main(String args[]) {
 try {
  StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
  DataNode datanode = createDataNode(args, null);
  if (datanode != null) {
   datanode.waitAndShutdown();
  }
 } catch (Throwable e) {
  LOG.error(StringUtils.stringifyException(e));
  System.exit(-1);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
  boolean keepPort) throws IOException {
 Configuration conf = dnprop.conf;
 String[] args = dnprop.dnArgs;
 Configuration newconf = new HdfsConfiguration(conf); // save cloned config
 if (keepPort) {
  InetSocketAddress addr = dnprop.datanode.getSelfAddr();
  conf.set("dfs.datanode.address", addr.getAddress().getHostAddress() + ":"
    + addr.getPort());
 }
 dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf),
   newconf, args));
 numDataNodes++;
 return true;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Check whether the datanode can be started.
 */
private boolean canStartDataNode(Configuration conf) throws IOException {
 DataNode dn = null;
 try {
  dn = DataNode.createDataNode(new String[]{}, conf);
 } catch(IOException e) {
  if (e instanceof java.net.BindException)
   return false;
  throw e;
 } finally {
  if(dn != null) dn.shutdown();
 }
 return true;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public static void secureMain(String args[], SecureResources resources) {
 int errorCode = 0;
 try {
  StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
  DataNode datanode = createDataNode(args, null, resources);
  if (datanode != null) {
   datanode.join();
  } else {
   errorCode = 1;
  }
 } catch (Throwable e) {
  LOG.fatal("Exception in secureMain", e);
  terminate(1, e);
 } finally {
  // We need to terminate the process here because either shutdown was called
  // or some disk related conditions like volumes tolerated or volumes required
  // condition was not met. Also, In secure mode, control will go to Jsvc
  // and Datanode process hangs if it does not exit.
  LOG.warn("Exiting Datanode");
  terminate(errorCode);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
  boolean keepPort) throws IOException {
 Configuration conf = dnprop.conf;
 String[] args = dnprop.dnArgs;
 SecureResources secureResources = dnprop.secureResources;
 Configuration newconf = new HdfsConfiguration(conf); // save cloned config
 if (keepPort) {
  InetSocketAddress addr = dnprop.datanode.getXferAddress();
  conf.set(DFS_DATANODE_ADDRESS_KEY, 
    addr.getAddress().getHostAddress() + ":" + addr.getPort());
  conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
    addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); 
 }
 DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
 dataNodes.add(new DataNodeProperties(
   newDn, newconf, args, secureResources, newDn.getIpcPort()));
 numDataNodes++;
 return true;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

public static void secureMain(String args[], SecureResources resources) {
 int errorCode = 0;
 try {
  StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
  DataNode datanode = createDataNode(args, null, resources);
  if (datanode != null) {
   datanode.join();
  } else {
   errorCode = 1;
  }
 } catch (Throwable e) {
  LOG.fatal("Exception in secureMain", e);
  terminate(1, e);
 } finally {
  // We need to terminate the process here because either shutdown was called
  // or some disk related conditions like volumes tolerated or volumes required
  // condition was not met. Also, In secure mode, control will go to Jsvc
  // and Datanode process hangs if it does not exit.
  LOG.warn("Exiting Datanode");
  terminate(errorCode);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

dn = DataNode.createDataNode(new String[]{},  conf);
dn.shutdown();
dn = null;
  memlockLimit+1);
try {
 dn = DataNode.createDataNode(new String[]{}, conf);
} catch (RuntimeException e) {
 GenericTestUtils.assertExceptionContains(

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

DataNode dn = null;
try {
 dn = DataNode.createDataNode(new String[]{}, conf);
 fail();
} catch(Exception e) {

相关文章

DataNode类方法