本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getDNRegistrationForBP()
方法的一些代码示例,展示了DataNode.getDNRegistrationForBP()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.getDNRegistrationForBP()
方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:getDNRegistrationForBP
[英]get BP registration by blockPool id
[中]通过区块池id获取BP注册
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* While writing to mirrorOut, failure to write to mirror should not
* affect this datanode unless it is caused by interruption.
*/
private void handleMirrorOutError(IOException ioe) throws IOException {
String bpid = block.getBlockPoolId();
LOG.info(datanode.getDNRegistrationForBP(bpid)
+ ":Exception writing " + block + " to mirror " + mirrorAddr, ioe);
if (Thread.interrupted()) { // shut down if the thread is interrupted
throw ioe;
} else { // encounter an error while writing to mirror
// continue to run even if can not write to mirror
// notify client of the error
// and wait for the client to shut down the pipeline
mirrorError = true;
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
private void checkReadAccess(final ExtendedBlock block) throws IOException {
// Make sure this node has registered for the block pool.
try {
getDNRegistrationForBP(block.getBlockPoolId());
} catch (IOException e) {
// if it has not registered with the NN, throw an exception back.
throw new org.apache.hadoop.ipc.RetriableException(
"Datanode not registered. Try again later.");
}
if (isBlockTokenEnabled) {
Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
.getTokenIdentifiers();
if (tokenIds.size() != 1) {
throw new IOException("Can't continue since none or more than one "
+ "BlockTokenIdentifier is found.");
}
for (TokenIdentifier tokenId : tokenIds) {
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
LOG.debug("Got: {}", id);
blockPoolTokenSecretManager.checkAccess(id, null, block,
BlockTokenIdentifier.AccessMode.READ, null, null);
}
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
datanode.getDNRegistrationForBP(bpId);
return;
} catch (IOException ioe) {
while (sw.now(TimeUnit.SECONDS) <= bpReadyTimeout) {
try {
datanode.getDNRegistrationForBP(bpId);
return;
} catch (IOException ioe) {
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
if (ClientTraceLog.isInfoEnabled() && isClient) {
long offset = 0;
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block
.getBlockPoolId());
ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr,
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
throws IOException {
BPOfferService bpos = getBPOSForBlock(block);
DatanodeRegistration bpReg = getDNRegistrationForBP(block.getBlockPoolId());
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
if (mode == BlockTokenIdentifier.AccessMode.WRITE) {
DatanodeRegistration dnR =
datanode.getDNRegistrationForBP(blk.getBlockPoolId());
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
String msg = " Offset " + startOffset + " and length " + length
+ " don't match block " + block + " ( blockLen " + end + " )";
LOG.warn(datanode.getDNRegistrationForBP(block.getBlockPoolId()) +
":sendBlock() : " + msg);
throw new IOException(msg);
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk
.getBlockPoolId());
BlockSender.ClientTraceLog.info(String.format(
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
datanode.getDNRegistrationForBP(block.getBlockPoolId());
final String clientTraceFmt =
clientName.length() > 0 && ClientTraceLog.isInfoEnabled()
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
public static DatanodeRegistration
getDNRegistrationForBP(DataNode dn, String bpid) throws IOException {
return dn.getDNRegistrationForBP(bpid);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* While writing to mirrorOut, failure to write to mirror should not
* affect this datanode unless it is caused by interruption.
*/
private void handleMirrorOutError(IOException ioe) throws IOException {
String bpid = block.getBlockPoolId();
LOG.info(datanode.getDNRegistrationForBP(bpid)
+ ":Exception writing " + block + " to mirror " + mirrorAddr, ioe);
if (Thread.interrupted()) { // shut down if the thread is interrupted
throw ioe;
} else { // encounter an error while writing to mirror
// continue to run even if can not write to mirror
// notify client of the error
// and wait for the client to shut down the pipeline
mirrorError = true;
}
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
/**
* While writing to mirrorOut, failure to write to mirror should not
* affect this datanode unless it is caused by interruption.
*/
private void handleMirrorOutError(IOException ioe) throws IOException {
String bpid = block.getBlockPoolId();
LOG.info(datanode.getDNRegistrationForBP(bpid)
+ ":Exception writing " + block + " to mirror " + mirrorAddr, ioe);
if (Thread.interrupted()) { // shut down if the thread is interrupted
throw ioe;
} else { // encounter an error while writing to mirror
// continue to run even if can not write to mirror
// notify client of the error
// and wait for the client to shut down the pipeline
mirrorError = true;
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
private void checkReadAccess(final ExtendedBlock block) throws IOException {
// Make sure this node has registered for the block pool.
try {
getDNRegistrationForBP(block.getBlockPoolId());
} catch (IOException e) {
// if it has not registered with the NN, throw an exception back.
throw new org.apache.hadoop.ipc.RetriableException(
"Datanode not registered. Try again later.");
}
if (isBlockTokenEnabled) {
Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
.getTokenIdentifiers();
if (tokenIds.size() != 1) {
throw new IOException("Can't continue since none or more than one "
+ "BlockTokenIdentifier is found.");
}
for (TokenIdentifier tokenId : tokenIds) {
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
if (LOG.isDebugEnabled()) {
LOG.debug("Got: " + id.toString());
}
blockPoolTokenSecretManager.checkAccess(id, null, block,
BlockTokenSecretManager.AccessMode.READ);
}
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] locs = new DatanodeInfo[] {
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
mockOtherDN };
RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
blocks.add(rBlock);
return blocks;
}
/**
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Before
public void startUpCluster() throws IOException {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
fs = cluster.getFileSystem();
client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()),
cluster.getConfiguration(0));
dn0 = cluster.getDataNodes().get(0);
poolId = cluster.getNamesystem().getBlockPoolId();
dn0Reg = dn0.getDNRegistrationForBP(poolId);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
BlockRecord blockRecord = new BlockRecord(
new DatanodeID(dnR), spyDN,
new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(),
block.getGenerationStamp(), ReplicaState.FINALIZED));
blocks.add(blockRecord);
return blocks;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Test writes a file and closes it.
* Block reported is generated with an extra block.
* Block report is forced and the check for # of pendingdeletion
* blocks is performed.
*
* @throws IOException in case of an error
*/
@Test(timeout=300000)
public void blockReport_04() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs, filePath,
FILE_SIZE, REPL_FACTOR, rand.nextLong());
DataNode dn = cluster.getDataNodes().get(DN_N0);
// all blocks belong to the same file, hence same BP
String poolId = cluster.getNamesystem().getBlockPoolId();
// Create a bogus new block which will not be present on the namenode.
ExtendedBlock b = new ExtendedBlock(
poolId, rand.nextLong(), 1024L, rand.nextLong());
dn.getFSDataset().createRbw(StorageType.DEFAULT, b, false);
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertThat("Wrong number of corrupt blocks",
cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
assertThat("Wrong number of PendingDeletion blocks",
cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Test writes a file and closes it.
* Block reported is generated with a bad GS for a single block.
* Block report is forced and the check for # of corrupted blocks is performed.
*
* @throws IOException in case of an error
*/
@Test(timeout=300000)
public void blockReport_03() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
writeFile(METHOD_NAME, FILE_SIZE, filePath);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N0);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertThat("Wrong number of corrupt blocks",
cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
assertThat("Wrong number of PendingDeletion blocks",
cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Test creates a file and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is completed test runs
* Block report and checks that no underreplicated blocks are left
*
* @throws IOException in case of an error
*/
@Test(timeout=300000)
public void blockReport_06() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
writeFile(METHOD_NAME, FILE_SIZE, filePath);
startDNandWait(filePath, true);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N1);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertEquals("Wrong number of PendingReplication Blocks",
0, cluster.getNamesystem().getUnderReplicatedBlocks());
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Reformatted DataNodes will replace the original UUID in the
* {@link DatanodeManager#datanodeMap}. This tests if block
* invalidation work on the original DataNode can be skipped.
*/
@Test(timeout=120000)
public void testDatanodeReformat() throws Exception {
namesystem.writeLock();
try {
// Change the datanode UUID to emulate a reformat
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnr = cluster.getDataNode(nodes[0].getIpcPort())
.getDNRegistrationForBP(poolId);
dnr = new DatanodeRegistration(UUID.randomUUID().toString(), dnr);
cluster.stopDataNode(nodes[0].getXferAddr());
Block block = new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP);
bm.addToInvalidates(block, nodes[0]);
bm.getDatanodeManager().registerDatanode(dnr);
// Since UUID has changed, the invalidation work should be skipped
assertEquals(0, bm.computeInvalidateWork(1));
assertEquals(0, bm.getPendingDeletionBlocksCount());
} finally {
namesystem.writeUnlock();
}
}
内容来源于网络,如有侵权,请联系作者删除!