本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown()
方法的一些代码示例,展示了DataNode.shutdown()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.shutdown()
方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:shutdown
[英]Shut down this instance of the datanode. Returns only after shutdown is complete. This method can only be called by the offerService thread. Otherwise, deadlock might occur.
[中]关闭此datanode实例。仅在关闭完成后返回。此方法只能由offerService线程调用。否则,可能会出现死锁。
代码示例来源:origin: apache/hbase
LOG.info("killing datanode " + name + " / " + lookup);
ipcPort = dn.ipcServer.getListenerAddress().getPort();
dn.shutdown();
LOG.info("killed datanode " + name + " / " + lookup);
break;
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
@Override public void run() {
if (!shutdownForUpgrade) {
// Delay the shutdown a bit if not doing for restart.
try {
Thread.sleep(1000);
} catch (InterruptedException ie) { }
}
shutdown();
}
};
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
startDataNode(dataDirs, resources);
} catch (IOException ie) {
shutdown();
throw ie;
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test
/**
* Stop the datanode.
*/
public void stopDataNode(DataNode dn) {
if (dn != null) {
dn.shutdown();
}
}
代码示例来源:origin: io.fabric8/fabric-hadoop
@Override
protected void doDelete(DataNode service) throws Exception {
service.shutdown();
}
}
代码示例来源:origin: griddynamics/jagger
public void shutdown() {
if (dataNode != null) {
dataNode.shutdown();
} else {
ready = true;
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Override public void run() {
if (!shutdownForUpgrade) {
// Delay the shutdown a bit if not doing for restart.
try {
Thread.sleep(1000);
} catch (InterruptedException ie) { }
}
shutdown();
}
};
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
@Override public void run() {
if (!shutdownForUpgrade) {
// Delay the shutdown a bit if not doing for restart.
try {
Thread.sleep(1000);
} catch (InterruptedException ie) { }
}
shutdown();
}
};
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test
/**
* Shutdown all DataNodes started by this class. The NameNode
* is left running so that new DataNodes may be started.
*/
public void shutdownDataNodes() {
for (int i = dataNodes.size()-1; i >= 0; i--) {
LOG.info("Shutting down DataNode " + i);
DataNode dn = dataNodes.remove(i).datanode;
dn.shutdown();
numDataNodes--;
}
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
/** Wait for the datanode to exit and clean up all its resources */
public void waitAndShutdown() {
join();
// make sure all other threads have exited even if
// offerservice thread died abnormally
shutdown();
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Stop the datanode.
*/
public void stopDataNode(DataNode dn) {
if (dn != null) {
dn.shutdown();
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test
public synchronized DataNodeProperties stopDataNode(int i) {
if (i < 0 || i >= dataNodes.size()) {
return null;
}
DataNodeProperties dnprop = dataNodes.remove(i);
DataNode dn = dnprop.datanode;
System.out.println("MiniDFSCluster Stopping DataNode " +
dn.dnRegistration.getName() +
" from a total of " + (dataNodes.size() + 1) +
" datanodes.");
dn.shutdown();
numDataNodes--;
return dnprop;
}
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
/**
* Create the DataNode given a configuration and an array of dataDirs.
* 'dataDirs' is where the blocks are stored.
*/
DataNode(Configuration conf,
AbstractList<File> dataDirs) throws IOException {
super(conf);
datanodeObject = this;
try {
startDataNode(conf, dataDirs);
} catch (IOException ie) {
shutdown();
throw ie;
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test
/**
* Check whether the datanode can be started.
*/
private boolean canStartDataNode(Configuration conf) throws IOException {
DataNode dn = null;
try {
dn = DataNode.createDataNode(new String[]{}, conf);
} catch(IOException e) {
if (e instanceof java.net.BindException)
return false;
throw e;
} finally {
if(dn != null) dn.shutdown();
}
return true;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Shutdown all DataNodes started by this class. The NameNode
* is left running so that new DataNodes may be started.
*/
public void shutdownDataNodes() {
for (int i = dataNodes.size()-1; i >= 0; i--) {
LOG.info("Shutting down DataNode " + i);
DataNode dn = dataNodes.remove(i).datanode;
dn.shutdown();
numDataNodes--;
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test
/**
* Cleans the resources and closes the instance of datanode
* @throws IOException if an error occurred
*/
@After
public void tearDown() throws IOException {
if (dn != null) {
try {
dn.shutdown();
} catch(Exception e) {
LOG.error("Cannot close: ", e);
} finally {
File dir = new File(DATA_DIR);
if (dir.exists())
Assert.assertTrue(
"Cannot delete data-node dirs", FileUtil.fullyDelete(dir));
}
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
public synchronized DataNodeProperties stopDataNode(int i) {
if (i < 0 || i >= dataNodes.size()) {
return null;
}
DataNodeProperties dnprop = dataNodes.remove(i);
DataNode dn = dnprop.datanode;
LOG.info("MiniDFSCluster Stopping DataNode " +
dn.getDisplayName() +
" from a total of " + (dataNodes.size() + 1) +
" datanodes.");
dn.shutdown();
numDataNodes--;
return dnprop;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Check whether the datanode can be started.
*/
private boolean canStartDataNode(Configuration conf) throws IOException {
DataNode dn = null;
try {
dn = DataNode.createDataNode(new String[]{}, conf);
} catch(IOException e) {
if (e instanceof java.net.BindException)
return false;
throw e;
} finally {
if(dn != null) dn.shutdown();
}
return true;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Test
public void testSendOOBToPeers() throws Exception {
DataNode dn = cluster.getDataNodes().get(0);
DataXceiverServer spyXserver = Mockito.spy(dn.getXferServer());
NullPointerException e = new NullPointerException();
Mockito.doThrow(e).when(spyXserver).sendOOBToPeers();
dn.xserver = spyXserver;
try {
dn.shutdown();
} catch (Throwable t) {
fail("DataNode shutdown should not have thrown exception " + t);
}
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Cleans the resources and closes the instance of datanode
* @throws IOException if an error occurred
*/
@After
public void tearDown() throws IOException {
if (dn != null) {
try {
dn.shutdown();
} catch(Exception e) {
LOG.error("Cannot close: ", e);
} finally {
File dir = new File(DATA_DIR);
if (dir.exists())
Assert.assertTrue(
"Cannot delete data-node dirs", FileUtil.fullyDelete(dir));
}
}
}
内容来源于网络,如有侵权,请联系作者删除!