org.apache.hadoop.hdfs.server.datanode.DataNode.getBlockScanner()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(6.9k)|赞(0)|评价(0)|浏览(112)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getBlockScanner()方法的一些代码示例,展示了DataNode.getBlockScanner()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getBlockScanner()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getBlockScanner

DataNode.getBlockScanner介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
 public void doGet(HttpServletRequest request,
          HttpServletResponse response) throws IOException {
  response.setContentType("text/plain");
  DataNode datanode = (DataNode)
    getServletContext().getAttribute("datanode");
  BlockScanner blockScanner = datanode.getBlockScanner();
  StringBuilder buffer = new StringBuilder(8 * 1024);
  if (!blockScanner.isEnabled()) {
   LOG.warn("Periodic block scanner is not running");
   buffer.append("Periodic block scanner is not running. " +
     "Please check the datanode log if this is unexpected.");
  } else {
   buffer.append("Block Scanner Statistics\n\n");
   blockScanner.printStats(buffer);
  }
  String resp = buffer.toString();
  LOG.trace("Returned Servlet info {}", resp);
  response.getWriter().write(resp);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
 LOG.error("BlockSender.sendChunks() exception: ", e);
 datanode.getBlockScanner().markSuspectBlock(
   ris.getVolumeRef().getVolume().getStorageID(),
   block);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

RoundRobinVolumeChoosingPolicy.class,
    VolumeChoosingPolicy.class), conf);
volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
  blockChooserImpl);
asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
 public void doGet(HttpServletRequest request,
          HttpServletResponse response) throws IOException {
  response.setContentType("text/plain");
  DataNode datanode = (DataNode)
    getServletContext().getAttribute("datanode");
  BlockScanner blockScanner = datanode.getBlockScanner();
  StringBuilder buffer = new StringBuilder(8 * 1024);
  if (!blockScanner.isEnabled()) {
   LOG.warn("Periodic block scanner is not running");
   buffer.append("Periodic block scanner is not running. " +
     "Please check the datanode log if this is unexpected.");
  } else {
   buffer.append("Block Scanner Statistics\n\n");
   blockScanner.printStats(buffer);
  }
  String resp = buffer.toString();
  LOG.trace("Returned Servlet info {}", resp);
  response.getWriter().write(resp);
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override
 public void doGet(HttpServletRequest request,
          HttpServletResponse response) throws IOException {
  response.setContentType("text/plain");
  DataNode datanode = (DataNode)
    getServletContext().getAttribute("datanode");
  BlockScanner blockScanner = datanode.getBlockScanner();
  StringBuilder buffer = new StringBuilder(8 * 1024);
  if (!blockScanner.isEnabled()) {
   LOG.warn("Periodic block scanner is not running");
   buffer.append("Periodic block scanner is not running. " +
     "Please check the datanode log if this is unexpected.");
  } else {
   buffer.append("Block Scanner Statistics\n\n");
   blockScanner.printStats(buffer);
  }
  String resp = buffer.toString();
  LOG.trace("Returned Servlet info {}", resp);
  response.getWriter().write(resp);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
 LOG.error("BlockSender.sendChunks() exception: ", e);
 datanode.getBlockScanner().markSuspectBlock(
   volumeRef.getVolume().getStorageID(),
   block);

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
 LOG.error("BlockSender.sendChunks() exception: ", e);
 datanode.getBlockScanner().markSuspectBlock(
   volumeRef.getVolume().getStorageID(),
   block);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Before
public void setUp() throws IOException {
 datanode = mock(DataNode.class);
 storage = mock(DataStorage.class);
 this.conf = new Configuration();
 this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
 final DNConf dnConf = new DNConf(conf);
 when(datanode.getConf()).thenReturn(conf);
 when(datanode.getDnConf()).thenReturn(dnConf);
 final BlockScanner disabledBlockScanner = new BlockScanner(datanode, conf);
 when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner);
 final ShortCircuitRegistry shortCircuitRegistry =
   new ShortCircuitRegistry(conf);
 when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
 createStorageDirs(storage, conf, NUM_INIT_VOLUMES);
 dataset = new FsDatasetImpl(datanode, storage, conf);
 for (String bpid : BLOCK_POOL_IDS) {
  dataset.addBlockPool(bpid, conf);
 }
 assertEquals(NUM_INIT_VOLUMES, dataset.getVolumes().size());
 assertEquals(0, dataset.getNumFailedVolumes());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Test(timeout=60000)
public void testDisableVolumeScanner() throws Exception {
 Configuration conf = new Configuration();
 disableBlockScanner(conf);
 TestContext ctx = new TestContext(conf, 1);
 try {
  Assert.assertFalse(ctx.datanode.getBlockScanner().isEnabled());
 } finally {
  ctx.close();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

RoundRobinVolumeChoosingPolicy.class,
    VolumeChoosingPolicy.class), conf);
volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
  blockChooserImpl);
asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

RoundRobinVolumeChoosingPolicy.class,
    VolumeChoosingPolicy.class), conf);
volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
  blockChooserImpl);
asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

initialFileLength+32*1024);
BlockScanner.Conf newConf = new BlockScanner.Conf(conf);
ctx.datanode.getBlockScanner().setConf(newConf);
ctx.datanode.getBlockScanner().markSuspectBlock(storageID, first);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

ctx.datanode.getBlockScanner().markSuspectBlock(storageID, first);
ctx.datanode.getBlockScanner().markSuspectBlock(storageID, first);
info.sem.release(10);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

TestContext(Configuration conf, int numNameServices) throws Exception {
 this.numNameServices = numNameServices;
 MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf).
   numDataNodes(1).
   storagesPerDatanode(1);
 if (numNameServices > 1) {
  bld.nnTopology(MiniDFSNNTopology.
     simpleFederatedTopology(numNameServices));
 }
 cluster = bld.build();
 cluster.waitActive();
 dfs = new DistributedFileSystem[numNameServices];
 for (int i = 0; i < numNameServices; i++) {
  dfs[i] = cluster.getFileSystem(i);
 }
 bpids = new String[numNameServices];
 for (int i = 0; i < numNameServices; i++) {
  bpids[i] = cluster.getNamesystem(i).getBlockPoolId();
 }
 datanode = cluster.getDataNodes().get(0);
 blockScanner = datanode.getBlockScanner();
 for (int i = 0; i < numNameServices; i++) {
  dfs[i].mkdirs(new Path("/test"));
 }
 data = datanode.getFSDataset();
 volumes = data.getVolumes();
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

datanode.getBlockScanner().removeAllVolumeScanners();

相关文章

DataNode类方法