org.apache.hadoop.hdfs.server.datanode.DataNode.checkBlockToken()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(5.4k)|赞(0)|评价(0)|浏览(140)

本文整理了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.checkBlockToken()方法的一些代码示例,展示了DataNode.checkBlockToken()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataNode.checkBlockToken()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.datanode.DataNode
类名称:DataNode
方法名:checkBlockToken

DataNode.checkBlockToken介绍

[英]Check block access token for the given access mode
[中]检查给定访问模式的块访问令牌

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
  Token<BlockTokenIdentifier> token) throws IOException {
 checkBlockLocalPathAccess();
 checkBlockToken(block, token, BlockTokenIdentifier.AccessMode.READ);
 Preconditions.checkNotNull(data, "Storage not yet initialized");
 BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
 if (info != null) {
  LOG.trace("getBlockLocalPathInfo successful " +
    "block={} blockfile {} metafile {}",
    block, info.getBlockPath(), info.getMetaPath());
 } else {
  LOG.trace("getBlockLocalPathInfo for block={} " +
    "returning null", block);
 }
 metrics.incrBlocksGetLocalPathInfo();
 return info;
}

代码示例来源:origin: io.fabric8/fabric-hadoop

/** {@inheritDoc} */
public LocatedBlock recoverBlock(Block block, boolean keepLength, DatanodeInfo[] targets
  ) throws IOException {
 logRecoverBlock("Client", block, targets);
 checkBlockToken(block, BlockTokenSecretManager.AccessMode.WRITE);
 return recoverBlock(block, keepLength, targets, false);
}

代码示例来源:origin: io.fabric8/fabric-hadoop

/** {@inheritDoc} */
public Block getBlockInfo(Block block) throws IOException {
 checkBlockToken(block, BlockTokenSecretManager.AccessMode.READ);
 Block stored = data.getStoredBlock(block.getBlockId());
 return stored;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(
  String bpId, long[] blockIds,
  List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
  UnsupportedOperationException {
 if (!getHdfsBlockLocationsEnabled) {
  throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
    + " is not enabled in datanode config");
 }
 if (blockIds.length != tokens.size()) {
  throw new IOException("Differing number of blocks and tokens");
 }
 // Check access for each block
 for (int i = 0; i < blockIds.length; i++) {
  checkBlockToken(new ExtendedBlock(bpId, blockIds[i]),
    tokens.get(i), BlockTokenSecretManager.AccessMode.READ);
 }
 DataNodeFaultInjector.get().getHdfsBlocksMetadata();
 return data.getHdfsBlocksMetadata(bpId, blockIds);
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(
  String bpId, long[] blockIds,
  List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
  UnsupportedOperationException {
 if (!getHdfsBlockLocationsEnabled) {
  throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
    + " is not enabled in datanode config");
 }
 if (blockIds.length != tokens.size()) {
  throw new IOException("Differing number of blocks and tokens");
 }
 // Check access for each block
 for (int i = 0; i < blockIds.length; i++) {
  checkBlockToken(new ExtendedBlock(bpId, blockIds[i]),
    tokens.get(i), BlockTokenSecretManager.AccessMode.READ);
 }
 DataNodeFaultInjector.get().getHdfsBlocksMetadata();
 return data.getHdfsBlocksMetadata(bpId, blockIds);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
  Token<BlockTokenIdentifier> token) throws IOException {
 checkBlockLocalPathAccess();
 checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
 Preconditions.checkNotNull(data, "Storage not yet initialized");
 BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
 if (LOG.isDebugEnabled()) {
  if (info != null) {
   if (LOG.isTraceEnabled()) {
    LOG.trace("getBlockLocalPathInfo successful block=" + block
      + " blockfile " + info.getBlockPath() + " metafile "
      + info.getMetaPath());
   }
  } else {
   if (LOG.isTraceEnabled()) {
    LOG.trace("getBlockLocalPathInfo for block=" + block
      + " returning null");
   }
  }
 }
 metrics.incrBlocksGetLocalPathInfo();
 return info;
}

代码示例来源:origin: io.fabric8/fabric-hadoop

@Override
public BlockLocalPathInfo getBlockLocalPathInfo(Block block,
  Token<BlockTokenIdentifier> token) throws IOException {
 checkBlockLocalPathAccess();
 checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
 BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
 if (LOG.isDebugEnabled()) {
  if (info != null) {
   if (LOG.isTraceEnabled()) {
    LOG.trace("getBlockLocalPathInfo successful block=" + block
      + " blockfile " + info.getBlockPath() + " metafile "
      + info.getMetaPath());
   }
  } else {
   if (LOG.isTraceEnabled()) {
    LOG.trace("getBlockLocalPathInfo for block=" + block
      + " returning null");
   }
  }
 }
 myMetrics.incrBlocksGetLocalPathInfo();
 return info;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
  Token<BlockTokenIdentifier> token) throws IOException {
 checkBlockLocalPathAccess();
 checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
 Preconditions.checkNotNull(data, "Storage not yet initialized");
 BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
 if (LOG.isDebugEnabled()) {
  if (info != null) {
   if (LOG.isTraceEnabled()) {
    LOG.trace("getBlockLocalPathInfo successful block=" + block
      + " blockfile " + info.getBlockPath() + " metafile "
      + info.getMetaPath());
   }
  } else {
   if (LOG.isTraceEnabled()) {
    LOG.trace("getBlockLocalPathInfo for block=" + block
      + " returning null");
   }
  }
 }
 metrics.incrBlocksGetLocalPathInfo();
 return info;
}

相关文章

DataNode类方法