本文整理了Java中org.apache.hadoop.hdfs.protocol.Block.setGenerationStamp()
方法的一些代码示例,展示了Block.setGenerationStamp()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Block.setGenerationStamp()
方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.protocol.Block
类名称:Block
方法名:setGenerationStamp
暂无
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
BlockToMarkCorrupt(Block corrupted, BlockInfo stored, long gs, String reason,
CorruptReplicasMap.Reason reasonCode) {
this(corrupted, stored, reason, reasonCode);
//the corrupted block in datanode has a different generation stamp
this.corrupted.setGenerationStamp(gs);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Create new block with a unique block id and a new generation stamp.
* @param blockType is the file under striping or contiguous layout?
*/
Block createNewBlock(BlockType blockType) throws IOException {
assert hasWriteLock();
Block b = new Block(nextBlockId(blockType), 0, 0);
// Increment the generation stamp for every new block.
b.setGenerationStamp(nextGenerationStamp(false));
return b;
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
public void setGenerationStamp(final long genStamp) {
block.setGenerationStamp(genStamp);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-client
public void setGenerationStamp(final long genStamp) {
block.setGenerationStamp(genStamp);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
public void setGenerationStamp(final long genStamp) {
block.setGenerationStamp(genStamp);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test
@Override
public synchronized ReplicaInPipelineInterface recoverAppend(Block b,
long newGS, long expectedBlockLen) throws IOException {
BInfo binfo = blockMap.get(b);
if (binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
}
if (binfo.isFinalized()) {
binfo.unfinalizeBlock();
}
blockMap.remove(b);
binfo.theBlock.setGenerationStamp(newGS);
blockMap.put(binfo.theBlock, binfo);
return binfo;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test
@Override
public synchronized ReplicaInPipelineInterface recoverRbw(Block b,
long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
BInfo binfo = blockMap.get(b);
if ( binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " does not exist, and cannot be appended to.");
}
if (binfo.isFinalized()) {
throw new ReplicaAlreadyExistsException("Block " + b
+ " is valid, and cannot be written to.");
}
blockMap.remove(b);
binfo.theBlock.setGenerationStamp(newGS);
blockMap.put(binfo.theBlock, binfo);
return binfo;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test
@Override
public void recoverClose(Block b, long newGS,
long expectedBlockLen) throws IOException {
BInfo binfo = blockMap.get(b);
if (binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
}
if (!binfo.isFinalized()) {
binfo.finalizeBlock(binfo.getNumBytes());
}
blockMap.remove(b);
binfo.theBlock.setGenerationStamp(newGS);
blockMap.put(binfo.theBlock, binfo);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test
private void corruptBlockGS(final Block block)
throws IOException {
if (block == null) {
throw new IOException("Block isn't suppose to be null");
}
long oldGS = block.getGenerationStamp();
long newGS = oldGS - rand.nextLong();
assertTrue("Old and new GS shouldn't be the same",
block.getGenerationStamp() != newGS);
block.setGenerationStamp(newGS);
if(LOG.isDebugEnabled()) {
LOG.debug("Generation stamp of " + block.getBlockName() +
" is changed to " + block.getGenerationStamp() + " from " + oldGS);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test
@Override
public Block getStoredBlock(long blkid) throws IOException {
Block b = new Block(blkid);
BInfo binfo = blockMap.get(b);
if (binfo == null) {
return null;
}
b.setGenerationStamp(binfo.getGenerationStamp());
b.setNumBytes(binfo.getNumBytes());
return b;
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
/**
* Allocate a block at the given pending filename
*
* @param src path to the file
* @param inodes INode representing each of the components of src.
* <code>inodes[inodes.length-1]</code> is the INode for the file.
*/
private Block allocateBlock(String src, INode[] inodes) throws IOException {
Block b = new Block(FSNamesystem.randBlockId.nextLong(), 0, 0);
while (isValidBlock(b)) {
b.setBlockId(FSNamesystem.randBlockId.nextLong());
}
b.setGenerationStamp(getGenerationStamp());
b = dir.addBlock(src, inodes, b);
return b;
}
代码示例来源:origin: linkedin/dynamometer
@Override // FsDatasetSpi
public synchronized ReplicaHandler recoverAppend(
ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
final Map<Block, BInfo> map = getBlockMap(b);
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
}
if (binfo.isFinalized()) {
binfo.unfinalizeBlock();
}
map.remove(b);
binfo.theBlock.setGenerationStamp(newGS);
map.put(binfo.theBlock, binfo);
return new ReplicaHandler(binfo, null);
}
代码示例来源:origin: linkedin/dynamometer
@Override // FsDatasetSpi
public synchronized ReplicaHandler recoverRbw(
ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
throws IOException {
final Map<Block, BInfo> map = getBlockMap(b);
BInfo binfo = map.get(b.getLocalBlock());
if ( binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " does not exist, and cannot be appended to.");
}
if (binfo.isFinalized()) {
throw new ReplicaAlreadyExistsException("Block " + b
+ " is valid, and cannot be written to.");
}
map.remove(b);
binfo.theBlock.setGenerationStamp(newGS);
map.put(binfo.theBlock, binfo);
return new ReplicaHandler(binfo, null);
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
/**
* Create new block with a unique block id and a new generation stamp.
*/
Block createNewBlock() throws IOException {
assert hasWriteLock();
Block b = new Block(nextBlockId(), 0, 0);
// Increment the generation stamp for every new block.
b.setGenerationStamp(nextGenerationStamp(false));
return b;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Create new block with a unique block id and a new generation stamp.
*/
Block createNewBlock() throws IOException {
assert hasWriteLock();
Block b = new Block(nextBlockId(), 0, 0);
// Increment the generation stamp for every new block.
b.setGenerationStamp(nextGenerationStamp(false));
return b;
}
代码示例来源:origin: linkedin/dynamometer
@Override // FsDatasetSpi
public Replica recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen)
throws IOException {
final Map<Block, BInfo> map = getBlockMap(b);
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
}
if (!binfo.isFinalized()) {
binfo.finalizeBlock(b.getBlockPoolId(), binfo.getNumBytes());
}
map.remove(b.getLocalBlock());
binfo.theBlock.setGenerationStamp(newGS);
map.put(binfo.theBlock, binfo);
return binfo;
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
public static void getBlockInfo(Block b, long[] blockArray, int index){
b.setBlockId(blockArray[index2BlockId(index)]);
b.setNumBytes(blockArray[index2BlockLen(index)]);
b.setGenerationStamp(blockArray[index2BlockGenStamp(index)]);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Override // FsDatasetSpi
public synchronized ReplicaHandler recoverAppend(
ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
}
if (binfo.isFinalized()) {
binfo.unfinalizeBlock();
}
map.remove(b);
binfo.theBlock.setGenerationStamp(newGS);
map.put(binfo.theBlock, binfo);
return new ReplicaHandler(binfo, null);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Override // FsDatasetSpi
public synchronized ReplicaHandler recoverRbw(
ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if ( binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " does not exist, and cannot be appended to.");
}
if (binfo.isFinalized()) {
throw new ReplicaAlreadyExistsException("Block " + b
+ " is valid, and cannot be written to.");
}
map.remove(b);
binfo.theBlock.setGenerationStamp(newGS);
map.put(binfo.theBlock, binfo);
return new ReplicaHandler(binfo, null);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Override // FsDatasetSpi
public Replica recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen)
throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
}
if (!binfo.isFinalized()) {
binfo.finalizeBlock(b.getBlockPoolId(), binfo.getNumBytes());
}
map.remove(b.getLocalBlock());
binfo.theBlock.setGenerationStamp(newGS);
map.put(binfo.theBlock, binfo);
return binfo;
}
内容来源于网络,如有侵权,请联系作者删除!