org.apache.hadoop.hdfs.server.datanode.DataNode.getFSDataset()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(8.8k)|赞(0)|评价(0)|浏览(142)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getFSDataset()方法的一些代码示例,展示了DataNode.getFSDataset()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getFSDataset()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getFSDataset

DataNode.getFSDataset介绍

[英]This method is used for testing. Examples are adding and deleting blocks directly. The most common usage will be when the data node's storage is similated.
[中]此方法用于测试。例如,直接添加和删除块。最常见的用法是当数据节点的存储相似时。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Report a bad block which is hosted on the local DN.
 */
public void reportBadBlocks(ExtendedBlock block) throws IOException{
 FsVolumeSpi volume = getFSDataset().getVolume(block);
 if (volume == null) {
  LOG.warn("Cannot find FsVolumeSpi to report bad block: {}", block);
  return;
 }
 reportBadBlocks(block, volume);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Signal the current rolling upgrade status as indicated by the NN.
 * @param rollingUpgradeStatus rolling upgrade status
 */
void signalRollingUpgrade(RollingUpgradeStatus rollingUpgradeStatus)
  throws IOException {
 if (rollingUpgradeStatus == null) {
  return;
 }
 String bpid = getBlockPoolId();
 if (!rollingUpgradeStatus.isFinalized()) {
  dn.getFSDataset().enableTrash(bpid);
  dn.getFSDataset().setRollingUpgradeMarker(bpid);
 } else {
  dn.getFSDataset().clearTrash(bpid);
  dn.getFSDataset().clearRollingUpgradeMarker(bpid);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private void reportBadBlock(final BPOfferService bpos,
  final ExtendedBlock block, final String msg) {
 FsVolumeSpi volume = getFSDataset().getVolume(block);
 if (volume == null) {
  LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
  return;
 }
 bpos.reportBadBlocks(
   block, volume.getStorageID(), volume.getStorageType());
 LOG.warn(msg);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private void sendLifeline() throws IOException {
  StorageReport[] reports =
    dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
  if (LOG.isDebugEnabled()) {
   LOG.debug("Sending lifeline with " + reports.length + " storage " +
        " reports from service actor: " + BPServiceActor.this);
  }
  VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
    .getVolumeFailureSummary();
  int numFailedVolumes = volumeFailureSummary != null ?
    volumeFailureSummary.getFailedStorageLocations().length : 0;
  lifelineNamenode.sendLifeline(bpRegistration,
                 reports,
                 dn.getFSDataset().getCacheCapacity(),
                 dn.getFSDataset().getCacheUsed(),
                 dn.getXmitsInProgress(),
                 dn.getXceiverCount(),
                 numFailedVolumes,
                 volumeFailureSummary);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

FsDatasetSpi.FsVolumeReferences fsVolumeReferences = null;
try {
 fsVolumeReferences = dn.getFSDataset().getFsVolumeReferences();
 Iterator<FsVolumeSpi> volumeIterator = fsVolumeReferences
   .iterator();

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

if (numOldDataDirs + getFSDataset().getNumFailedVolumes()
  + changedVolumes.newLocations.size()
  - changedVolumes.deactivateLocations.size() <= 0) {

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

DatanodeCommand cacheReport() throws IOException {
 // If caching is disabled, do not send a cache report
 if (dn.getFSDataset().getCacheCapacity() == 0) {
  return null;
 }
 // send cache report if timer has expired.
 DatanodeCommand cmd = null;
 final long startTime = monotonicNow();
 if (startTime - lastCacheReport > dnConf.cacheReportInterval) {
  if (LOG.isDebugEnabled()) {
   LOG.debug("Sending cacheReport from service actor: " + this);
  }
  lastCacheReport = startTime;
  String bpid = bpos.getBlockPoolId();
  List<Long> blockIds = dn.getFSDataset().getCacheReport(bpid);
  long createTime = monotonicNow();
  cmd = bpNamenode.cacheReport(bpRegistration, bpid, blockIds);
  long sendTime = monotonicNow();
  long createCost = createTime - startTime;
  long sendCost = sendTime - createTime;
  dn.getMetrics().addCacheReport(sendCost);
  if (LOG.isDebugEnabled()) {
   LOG.debug("CacheReport of " + blockIds.size()
     + " block(s) took " + createCost + " msec to generate and "
     + sendCost + " msecs for RPC and NN processing");
  }
 }
 return cmd;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

if (getFSDataset().getNumFailedVolumes() > 0) {
 for (String failedStorageLocation : getFSDataset()
   .getVolumeFailureSummary().getFailedStorageLocations()) {
  boolean found = false;

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
 public void run() {
  boolean succeeded = false;
  final FsDatasetImpl dataset = (FsDatasetImpl)datanode.getFSDataset();
  try (FsVolumeReference ref = this.targetVolume) {
   int smallBufferSize = DFSUtilClient.getSmallBufferSize(EMPTY_HDFS_CONF);
   FsVolumeImpl volume = (FsVolumeImpl)ref.getVolume();
   File[] targetFiles = volume.copyBlockToLazyPersistLocation(bpId,
     blockId, genStamp, replicaInfo, smallBufferSize, conf);
   // Lock FsDataSetImpl during onCompleteLazyPersist callback
   dataset.onCompleteLazyPersist(bpId, blockId,
       creationTime, targetFiles, volume);
   succeeded = true;
  } catch (Exception e){
   FsDatasetImpl.LOG.warn(
     "LazyWriter failed to async persist RamDisk block pool id: "
     + bpId + "block Id: " + blockId, e);
  } finally {
   if (!succeeded) {
    dataset.onFailLazyPersist(bpId, blockId);
   }
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private void notifyNamenodeBlock(ExtendedBlock block, BlockStatus status,
  String delHint, String storageUuid, boolean isOnTransientStorage) {
 checkBlock(block);
 final ReceivedDeletedBlockInfo info = new ReceivedDeletedBlockInfo(
   block.getLocalBlock(), status, delHint);
 final DatanodeStorage storage = dn.getFSDataset().getStorage(storageUuid);
 for (BPServiceActor actor : bpServices) {
  actor.getIbrManager().notifyNamenodeBlock(info, storage,
    isOnTransientStorage);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

scheduler.scheduleNextHeartbeat();
StorageReport[] reports =
  dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
if (LOG.isDebugEnabled()) {
 LOG.debug("Sending heartbeat with " + reports.length +
VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
  .getVolumeFailureSummary();
int numFailedVolumes = volumeFailureSummary != null ?
  dn.getFSDataset().getCacheCapacity(),
  dn.getFSDataset().getCacheUsed(),
  dn.getXmitsInProgress(),
  dn.getXceiverCount(),

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

this.datanode.getFSDataset().submitBackgroundSyncFileRangeRequest(
  block, streams, lastCacheManagementOffset,
  offsetInBlock - lastCacheManagementOffset,

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

dn.getFSDataset().getBlockReports(bpos.getBlockPoolId());

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

try {
  dn.getFSDataset().invalidate(bcmd.getBlockPoolId(), toDelete);
 } catch(IOException e) {
  blockIdCmd.getBlockPoolId() + " of [" +
   blockIdArrayToString(blockIdCmd.getBlockIds()) + "]");
 dn.getFSDataset().cache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
 break;
case DatanodeProtocol.DNA_UNCACHE:
  blockIdCmd.getBlockPoolId() + " of [" +
   blockIdArrayToString(blockIdCmd.getBlockIds()) + "]");
 dn.getFSDataset().uncache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
 break;
case DatanodeProtocol.DNA_SHUTDOWN:

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * This method is used for testing. 
 * Examples are adding and deleting blocks directly.
 * The most common usage will be when the data node's storage is simulated.
 * 
 * @return the fsdataset that stores the blocks
 */
public static FsDatasetSpi<?> getFSDataset(DataNode dn) {
 return dn.getFSDataset();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

static long getTotalDfsUsed(MiniDFSCluster cluster) throws IOException {
 long total = 0;
 for(DataNode node : cluster.getDataNodes()) {
  total += node.getFSDataset().getDfsUsed();
 }
 return total;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
 public Void answer(InvocationOnMock invocation) throws Throwable {
  if (count.getAndIncrement() == 0) {
   throw new IOException("faked initBlockPool exception");
  }
  // The initBlockPool is called again. Now mock init is done.
  Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
  return null;
 }
}).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Report a bad block which is hosted on the local DN.
 */
public void reportBadBlocks(ExtendedBlock block) throws IOException{
 FsVolumeSpi volume = getFSDataset().getVolume(block);
 if (volume == null) {
  LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
  return;
 }
 reportBadBlocks(block, volume);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private void notifyNamenodeBlock(ExtendedBlock block, BlockStatus status,
  String delHint, String storageUuid, boolean isOnTransientStorage) {
 checkBlock(block);
 final ReceivedDeletedBlockInfo info = new ReceivedDeletedBlockInfo(
   block.getLocalBlock(), status, delHint);
 final DatanodeStorage storage = dn.getFSDataset().getStorage(storageUuid);
 for (BPServiceActor actor : bpServices) {
  actor.getIbrManager().notifyNamenodeBlock(info, storage,
    isOnTransientStorage);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Before
public void startCluster() throws IOException {
 conf = new HdfsConfiguration();
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DN_COUNT).build();
 singletonNn = cluster.getNameNode();
 singletonDn = cluster.getDataNodes().get(0);
 bpos = singletonDn.getAllBpOs()[0];
 actor = bpos.getBPServiceActors().get(0);
 storageUuid = singletonDn.getFSDataset().getVolumes().get(0).getStorageID();
}

相关文章

DataNode类方法