本文整理了Java中org.apache.hadoop.hdfs.protocol.Block.getNumBytes()
方法的一些代码示例,展示了Block.getNumBytes()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Block.getNumBytes()
方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.protocol.Block
类名称:Block
方法名:getNumBytes
暂无
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public long getNumBytes() {
return block.getNumBytes();
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
private static long getSize(FSEditLogOp.AddCloseOp acOp) {
long size = 0;
for (Block b : acOp.getBlocks()) {
size += b.getNumBytes();
}
return size;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public static void blockToXml(ContentHandler contentHandler, Block block)
throws SAXException {
contentHandler.startElement("", "", "BLOCK", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "BLOCK_ID",
Long.toString(block.getBlockId()));
XMLUtils.addSaxString(contentHandler, "NUM_BYTES",
Long.toString(block.getNumBytes()));
XMLUtils.addSaxString(contentHandler, "GENSTAMP",
Long.toString(block.getGenerationStamp()));
contentHandler.endElement("", "", "BLOCK");
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Constructor
* @param block a block
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
LocalReplica(Block block, FsVolumeSpi vol, File dir) {
this(block.getBlockId(), block.getNumBytes(),
block.getGenerationStamp(), vol, dir);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Constructor
* @param block a block
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param writer a thread that is writing to this replica
*/
LocalReplicaInPipeline(Block block,
FsVolumeSpi vol, File dir, Thread writer) {
this(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
vol, dir, writer, 0L);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Constructor.
* @param block a block
* @param vol volume where replica is located
*/
ReplicaInfo(Block block, FsVolumeSpi vol) {
this(vol, block.getBlockId(), block.getNumBytes(),
block.getGenerationStamp());
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Temporary files. They get moved to the finalized block directory when
* the block is finalized.
*/
File createTmpFile(String bpid, Block b) throws IOException {
checkReference();
reserveSpaceForReplica(b.getNumBytes());
try {
return getBlockPoolSlice(bpid).createTmpFile(b);
} catch (IOException exception) {
releaseReservedSpace(b.getNumBytes());
throw exception;
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* RBW files. They get moved to the finalized block directory when
* the block is finalized.
*/
File createRbwFile(String bpid, Block b) throws IOException {
checkReference();
reserveSpaceForReplica(b.getNumBytes());
try {
return getBlockPoolSlice(bpid).createRbwFile(b);
} catch (IOException exception) {
releaseReservedSpace(b.getNumBytes());
throw exception;
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
@Override
public String toString() {
final Block b = reportedBlock != null ? reportedBlock.getBlock() : null;
String bStr = b != null ? (b + " with size=" + b.getNumBytes() + " ")
: " ";
return bStr + "from " + source.getDisplayName() + " to " + target
.getDisplayName() + " through " + (proxySource != null ? proxySource
.datanode : "");
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Write an array of blocks as compactly as possible. This uses
* delta-encoding for the generation stamp and size, following
* the principle that genstamp increases relatively slowly,
* and size is equal for all but the last block of a file.
*/
public static void writeCompactBlockArray(
Block[] blocks, DataOutputStream out) throws IOException {
WritableUtils.writeVInt(out, blocks.length);
Block prev = null;
for (Block b : blocks) {
long szDelta = b.getNumBytes() -
(prev != null ? prev.getNumBytes() : 0);
long gsDelta = b.getGenerationStamp() -
(prev != null ? prev.getGenerationStamp() : 0);
out.writeLong(b.getBlockId()); // blockid is random
WritableUtils.writeVLong(out, szDelta);
WritableUtils.writeVLong(out, gsDelta);
prev = b;
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public static Block[] readCompactBlockArray(
DataInput in, int logVersion) throws IOException {
int num = WritableUtils.readVInt(in);
if (num < 0) {
throw new IOException("Invalid block array length: " + num);
}
Block prev = null;
Block[] ret = new Block[num];
for (int i = 0; i < num; i++) {
long id = in.readLong();
long sz = WritableUtils.readVLong(in) +
((prev != null) ? prev.getNumBytes() : 0);
long gs = WritableUtils.readVLong(in) +
((prev != null) ? prev.getGenerationStamp() : 0);
ret[i] = new Block(id, sz, gs);
prev = ret[i];
}
return ret;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Commit a block of a file
*
* @param block block to be committed
* @param commitBlock - contains client reported block length and generation
* @return true if the block is changed to committed state.
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
private boolean commitBlock(final BlockInfo block,
final Block commitBlock) throws IOException {
if (block.getBlockUCState() == BlockUCState.COMMITTED)
return false;
assert block.getNumBytes() <= commitBlock.getNumBytes() :
"commitBlock length is less than the stored one "
+ commitBlock.getNumBytes() + " vs. " + block.getNumBytes();
if(block.getGenerationStamp() != commitBlock.getGenerationStamp()) {
throw new IOException("Commit block with mismatching GS. NN has " +
block + ", client submits " + commitBlock);
}
List<ReplicaUnderConstruction> staleReplicas =
block.commitBlock(commitBlock);
removeStaleReplicas(staleReplicas, block);
return true;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
File addFinalizedBlock(Block b, ReplicaInfo replicaInfo) throws IOException {
File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
fileIoProvider.mkdirsWithExistsCheck(volume, blockDir);
File blockFile = FsDatasetImpl.moveBlockFiles(b, replicaInfo, blockDir);
File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
if (dfsUsage instanceof CachingGetSpaceUsed) {
((CachingGetSpaceUsed) dfsUsage).incDfsUsed(
b.getNumBytes() + metaFile.length());
}
return blockFile;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Commit block's length and generation stamp as reported by the client.
* Set block state to {@link BlockUCState#COMMITTED}.
* @param block - contains client reported block length and generation
* @return staleReplica's List.
* @throws IOException if block ids are inconsistent.
*/
List<ReplicaUnderConstruction> commitBlock(Block block) throws IOException {
if (getBlockId() != block.getBlockId()) {
throw new IOException("Trying to commit inconsistent block: id = "
+ block.getBlockId() + ", expected id = " + getBlockId());
}
Preconditions.checkState(!isComplete());
uc.commit();
this.setNumBytes(block.getNumBytes());
// Sort out invalid replicas.
return setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public FinalizedProvidedReplica(FileRegion fileRegion, FsVolumeSpi volume,
Configuration conf, FileSystem remoteFS) {
super(fileRegion.getBlock().getBlockId(),
fileRegion.getProvidedStorageLocation().getPath().toUri(),
fileRegion.getProvidedStorageLocation().getOffset(),
fileRegion.getBlock().getNumBytes(),
fileRegion.getBlock().getGenerationStamp(),
new RawPathHandle(ByteBuffer
.wrap(fileRegion.getProvidedStorageLocation().getNonce())),
volume, conf, remoteFS);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public ReplicaInPipeline createTemporary(ExtendedBlock b) throws IOException {
// create a temporary file to hold block in the designated volume
File f = createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
LocalReplicaInPipeline newReplicaInfo =
new ReplicaBuilder(ReplicaState.TEMPORARY)
.setBlockId(b.getBlockId())
.setGenerationStamp(b.getGenerationStamp())
.setDirectoryToUse(f.getParentFile())
.setBytesToReserve(b.getLocalBlock().getNumBytes())
.setFsVolume(this)
.buildLocalReplicaInPipeline();
return newReplicaInfo;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
@Override
public void run() {
final long blockLength = replicaToDelete.getBlockDataLength();
final long metaLength = replicaToDelete.getMetadataLength();
boolean result;
result = (trashDirectory == null) ? deleteFiles() : moveFiles();
if (!result) {
LOG.warn("Unexpected error trying to "
+ (trashDirectory == null ? "delete" : "move")
+ " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
+ " at file " + replicaToDelete.getBlockURI() + ". Ignored.");
} else {
if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
}
volume.onBlockFileDeletion(block.getBlockPoolId(), blockLength);
volume.onMetaFileDeletion(block.getBlockPoolId(), metaLength);
LOG.info("Deleted " + block.getBlockPoolId() + " "
+ block.getLocalBlock() + " URI " + replicaToDelete.getBlockURI());
}
updateDeletedBlockId(block);
IOUtils.cleanup(null, volumeRef);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
.setPathSuffix(blockSuffix)
.setOffset(region.getProvidedStorageLocation().getOffset())
.setLength(region.getBlock().getNumBytes())
.setGenerationStamp(region.getBlock().getGenerationStamp())
.setPathHandle(pathHandle)
bpVolumeMap.add(bpid, newReplica);
incrNumBlocks();
incDfsUsed(region.getBlock().getNumBytes());
} else {
LOG.warn("A block with id " + newReplica.getBlockId()
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
file.computeFileSize() - newLength, truncateBlock);
assert Block.matchingIdAndGenStamp(tBlk, truncateBlock) &&
tBlk.getNumBytes() == truncateBlock.getNumBytes() :
"Should be the same block.";
if (oldBlock.getBlockId() != tBlk.getBlockId()
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
oldLastBlock.setNumBytes(pBlock.getNumBytes());
if (!oldLastBlock.isComplete()) {
fsNamesys.getBlockManager().forceCompleteBlock(oldLastBlock);
内容来源于网络,如有侵权,请联系作者删除!