org.apache.hadoop.hdfs.protocol.Block.getBlockId()方法的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(9.0k)|赞(0)|评价(0)|浏览(264)

本文整理了Java中org.apache.hadoop.hdfs.protocol.Block.getBlockId()方法的一些代码示例,展示了Block.getBlockId()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Block.getBlockId()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.protocol.Block
类名称:Block
方法名:getBlockId

Block.getBlockId介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
 public int compare(Object o1, Object o2) {
  long lookup = (long) o1;
  long stored = ((Block) o2).getBlockId();
  return lookup > stored ? 1 : lookup < stored ? -1 : 0;
 }
};

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public static byte getBlockIndex(Block reportedBlock) {
 return (byte) (reportedBlock.getBlockId() &
   HdfsServerConstants.BLOCK_GROUP_INDEX_MASK);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

void addBlockPinningFailures(PendingMove pendingBlock) {
 synchronized (blockPinningFailures) {
  long blockId = pendingBlock.reportedBlock.getBlock().getBlockId();
  Set<DatanodeInfo> pinnedLocations = blockPinningFailures.get(blockId);
  if (pinnedLocations == null) {
   pinnedLocations = new HashSet<>();
   blockPinningFailures.put(blockId, pinnedLocations);
  }
  pinnedLocations.add(pendingBlock.getSource());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
 Preconditions.checkArgument(this.getBlockId() == reportedBlock.getBlockId(),
   "reported blk_%s is different from stored blk_%s",
   reportedBlock.getBlockId(), this.getBlockId());
 // find the last null node
 int lastNode = ensureCapacity(1);
 setStorageInfo(lastNode, storage);
 return true;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
public void store(FileRegion token) throws IOException {
 final Block block = token.getBlock();
 final ProvidedStorageLocation psl = token.getProvidedStorageLocation();
 out.append(String.valueOf(block.getBlockId())).append(delim);
 out.append(psl.getPath().toString()).append(delim);
 out.append(Long.toString(psl.getOffset())).append(delim);
 out.append(Long.toString(psl.getLength())).append(delim);
 out.append(Long.toString(block.getGenerationStamp()));
 if (psl.getNonce().length > 0) {
  out.append(delim)
    .append(Base64.getEncoder().encodeToString(psl.getNonce()));
 }
 out.append("\n");
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Return true if the block is a striped block.
 *
 * Before HDFS-4645, block ID was randomly generated (legacy), so it is
 * possible that legacy block ID to be negative, which should not be
 * considered as striped block ID.
 *
 * @see #isLegacyBlock(Block) detecting legacy block IDs.
 */
public boolean isStripedBlock(Block block) {
 return isStripedBlockID(block.getBlockId()) && !isLegacyBlock(block);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public static void blockToXml(ContentHandler contentHandler, Block block) 
  throws SAXException {
 contentHandler.startElement("", "", "BLOCK", new AttributesImpl());
 XMLUtils.addSaxString(contentHandler, "BLOCK_ID",
   Long.toString(block.getBlockId()));
 XMLUtils.addSaxString(contentHandler, "NUM_BYTES",
   Long.toString(block.getNumBytes()));
 XMLUtils.addSaxString(contentHandler, "GENSTAMP",
   Long.toString(block.getGenerationStamp()));
 contentHandler.endElement("", "", "BLOCK");
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Constructor
 * @param block a block
 * @param vol volume where replica is located
 * @param dir directory path where block and meta files are located
 * @param writer a thread that is writing to this replica
 */
LocalReplicaInPipeline(Block block,
  FsVolumeSpi vol, File dir, Thread writer) {
 this(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
   vol, dir, writer, 0L);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Constructor.
 * @param block a block
 * @param vol volume where replica is located
 */
ReplicaInfo(Block block, FsVolumeSpi vol) {
 this(vol, block.getBlockId(), block.getNumBytes(),
   block.getGenerationStamp());
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Constructor
 * @param block a block
 * @param vol volume where replica is located
 * @param dir directory path where block and meta files are located
 */
LocalReplica(Block block, FsVolumeSpi vol, File dir) {
 this(block.getBlockId(), block.getNumBytes(),
   block.getGenerationStamp(), vol, dir);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
  * @param b A block object whose id is set to the starting point for check
  * @return true if any ID in the range
  *      {id, id+HdfsConstants.MAX_BLOCKS_IN_GROUP} is pointed-to by a stored
  *      block.
  */
 private boolean hasValidBlockInRange(Block b) {
  final long id = b.getBlockId();
  for (int i = 0; i < MAX_BLOCKS_IN_GROUP; i++) {
   b.setBlockId(id + i);
   if (blockManager.getStoredBlock(b) != null) {
    return true;
   }
  }
  return false;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // NumberGenerator
public long nextValue() {
 Block b = new Block(super.nextValue());
 // There may be an occasional conflict with randomly generated
 // block IDs. Skip over the conflicts.
 while(isValidBlock(b)) {
  b.setBlockId(super.nextValue());
 }
 if (b.getBlockId() < 0) {
  throw new IllegalStateException("All positive block IDs are used, " +
    "wrapping to negative IDs, " +
    "which might conflict with erasure coded block groups.");
 }
 return b.getBlockId();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public BlockInfo getStoredBlock(Block block) {
 if (!BlockIdManager.isStripedBlockID(block.getBlockId())) {
  return blocksMap.getStoredBlock(block);
 }
 if (!hasNonEcBlockUsingStripedID) {
  return blocksMap.getStoredBlock(
    new Block(BlockIdManager.convertToStripedID(block.getBlockId())));
 }
 BlockInfo info = blocksMap.getStoredBlock(block);
 if (info != null) {
  return info;
 }
 return blocksMap.getStoredBlock(
   new Block(BlockIdManager.convertToStripedID(block.getBlockId())));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

File addFinalizedBlock(Block b, ReplicaInfo replicaInfo) throws IOException {
 File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
 fileIoProvider.mkdirsWithExistsCheck(volume, blockDir);
 File blockFile = FsDatasetImpl.moveBlockFiles(b, replicaInfo, blockDir);
 File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
 if (dfsUsage instanceof CachingGetSpaceUsed) {
  ((CachingGetSpaceUsed) dfsUsage).incDfsUsed(
    b.getNumBytes() + metaFile.length());
 }
 return blockFile;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // FsDatasetSpi
public boolean contains(final ExtendedBlock block) {
 try (AutoCloseableLock lock = datasetLock.acquire()) {
  final long blockId = block.getLocalBlock().getBlockId();
  final String bpid = block.getBlockPoolId();
  final ReplicaInfo r = volumeMap.get(bpid, blockId);
  return (r != null && r.blockDataExists());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Get the meta information of the replica that matches both block id 
 * and generation stamp
 * @param bpid block pool id
 * @param block block with its id as the key
 * @return the replica's meta information
 * @throws IllegalArgumentException if the input block or block pool is null
 */
ReplicaInfo get(String bpid, Block block) {
 checkBlockPool(bpid);
 checkBlock(block);
 ReplicaInfo replicaInfo = get(bpid, block.getBlockId());
 if (replicaInfo != null && 
   block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
  return replicaInfo;
 }
 return null;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public void compileReport(LinkedList<ScanInfo> report,
  ReportCompiler reportCompiler)
    throws IOException, InterruptedException {
 /* refresh the aliasMap and return the list of blocks found.
  * the assumption here is that the block ids in the external
  * block map, after the refresh, are consistent with those
  * from before the refresh, i.e., for blocks which did not change,
  * the ids remain the same.
  */
 aliasMap.refresh();
 BlockAliasMap.Reader<FileRegion> reader = aliasMap.getReader(null, bpid);
 for (FileRegion region : reader) {
  reportCompiler.throttle();
  report.add(new ScanInfo(region.getBlock().getBlockId(),
    providedVolume, region,
    region.getProvidedStorageLocation().getLength()));
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public FinalizedProvidedReplica(FileRegion fileRegion, FsVolumeSpi volume,
  Configuration conf, FileSystem remoteFS) {
 super(fileRegion.getBlock().getBlockId(),
   fileRegion.getProvidedStorageLocation().getPath().toUri(),
   fileRegion.getProvidedStorageLocation().getOffset(),
   fileRegion.getBlock().getNumBytes(),
   fileRegion.getBlock().getGenerationStamp(),
   new RawPathHandle(ByteBuffer
     .wrap(fileRegion.getProvidedStorageLocation().getNonce())),
   volume, conf, remoteFS);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public DBlock getInternalBlock(StorageGroup storage) {
 int idxInLocs = locations.indexOf(storage);
 if (idxInLocs == -1) {
  return null;
 }
 byte idxInGroup = indices[idxInLocs];
 long blkId = getBlock().getBlockId() + idxInGroup;
 long numBytes = getInternalBlockLength(getNumBytes(), cellSize,
   dataBlockNum, idxInGroup);
 Block blk = new Block(getBlock());
 blk.setBlockId(blkId);
 blk.setNumBytes(numBytes);
 DBlock dblk = new DBlock(blk);
 dblk.addLocation(storage);
 return dblk;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // NumberGenerator
public long nextValue() {
 skipTo((getCurrentValue() & ~BLOCK_GROUP_INDEX_MASK) + MAX_BLOCKS_IN_GROUP);
 // Make sure there's no conflict with existing random block IDs
 final Block b = new Block(getCurrentValue());
 while (hasValidBlockInRange(b)) {
  skipTo(getCurrentValue() + MAX_BLOCKS_IN_GROUP);
  b.setBlockId(getCurrentValue());
 }
 if (b.getBlockId() >= 0) {
  throw new IllegalStateException("All negative block group IDs are used, "
    + "growing into positive IDs, "
    + "which might conflict with non-erasure coded blocks.");
 }
 return getCurrentValue();
}

相关文章