org.apache.hadoop.hdfs.protocol.Block.toString()方法的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(8.7k)|赞(0)|评价(0)|浏览(231)

本文整理了Java中org.apache.hadoop.hdfs.protocol.Block.toString()方法的一些代码示例,展示了Block.toString()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Block.toString()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.protocol.Block
类名称:Block
方法名:toString

Block.toString介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
 public String toString() {
  return block.toString() + ", status: " + status +
   ", delHint: " + delHints;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
 public String toString() {
  return super.toString() + "[numBytes=" + this.getNumBytes() +
    ",originalReplicaState=" + this.originalState.name() + "]";
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private void removeStaleReplicas(List<ReplicaUnderConstruction> staleReplicas,
  BlockInfo block) {
 for (ReplicaUnderConstruction r : staleReplicas) {
  removeStoredBlock(block,
    r.getExpectedStorageLocation().getDatanodeDescriptor());
  NameNode.blockStateChangeLog
    .debug("BLOCK* Removing stale replica {}" + " of {}", r,
      Block.toString(r));
 }
}
/**

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override  //Object
public String toString() {
 return getClass().getSimpleName()
   + ", " + super.toString()
   + ", " + getState()
   + "\n  getNumBytes()     = " + getNumBytes()
   + "\n  getBytesOnDisk()  = " + getBytesOnDisk()
   + "\n  getVisibleLength()= " + getVisibleLength()
   + "\n  getVolume()       = " + getVolume()
   + "\n  getBlockURI()     = " + getBlockURI();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

errors.add("Failed to delete replica " + invalidBlks[i]
  + ": GenerationStamp not matched, existing replica is "
  + Block.toString(infoByBlockId));

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

DatanodeStorageInfo storageInfo = node.getStorageInfo(storageId);
State state = (storageInfo == null) ? null : storageInfo.getState();
out.println("Block=" + block.toString()
  + "\tSize=" + block.getNumBytes()
  + "\tNode=" + node.getName() + "\tStorageID=" + storageId

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
 public String toString() {
  return block.toString() + ", status: " + status +
   ", delHint: " + delHints;
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override
 public String toString() {
  return block.toString() + ", status: " + status +
   ", delHint: " + delHints;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-client

/**
 */
@Override
public String toString() {
 return toString(this);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public String toString(){
  return block.toString();
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public String toString(){
  return block.toString() + ", delHint: " + delHints;
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override  //Object
public String toString() {
 return getClass().getSimpleName()
   + ", " + super.toString()
   + ", " + getState()
   + "\n  getNumBytes()     = " + getNumBytes()
   + "\n  getBytesOnDisk()  = " + getBytesOnDisk()
   + "\n  getVisibleLength()= " + getVisibleLength()
   + "\n  getVolume()       = " + getVolume()
   + "\n  getBlockFile()    = " + getBlockFile();
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override  //Object
public String toString() {
 return getClass().getSimpleName()
   + ", " + super.toString()
   + ", " + getState()
   + "\n  getNumBytes()     = " + getNumBytes()
   + "\n  getBytesOnDisk()  = " + getBytesOnDisk()
   + "\n  getVisibleLength()= " + getVisibleLength()
   + "\n  getVolume()       = " + getVolume()
   + "\n  getBlockFile()    = " + getBlockFile();
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

invalidBlks[i].toString(), namespaceId);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
  DatanodeID datanodeid, Configuration conf, int socketTimeout,
  boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
 final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
 InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
 if (LOG.isDebugEnabled()) {
  LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
 }
 
 // Since we're creating a new UserGroupInformation here, we know that no
 // future RPC proxies will be able to re-use the same connection. And
 // usages of this proxy tend to be one-off calls.
 //
 // This is a temporary fix: callers should really achieve this by using
 // RPC.stopProxy() on the resulting object, but this is currently not
 // working in trunk. See the discussion on HDFS-1965.
 Configuration confWithNoIpcIdle = new Configuration(conf);
 confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic
   .IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
 UserGroupInformation ticket = UserGroupInformation
   .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
 ticket.addToken(locatedBlock.getBlockToken());
 return createClientDatanodeProtocolProxy(addr, ticket, confWithNoIpcIdle,
   NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-client

static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
  DatanodeID datanodeid, Configuration conf, int socketTimeout,
  boolean connectToDnViaHostname, LocatedBlock locatedBlock)
  throws IOException {
 final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
 InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
 LOG.debug("Connecting to datanode {} addr={}", dnAddr, addr);
 // Since we're creating a new UserGroupInformation here, we know that no
 // future RPC proxies will be able to re-use the same connection. And
 // usages of this proxy tend to be one-off calls.
 //
 // This is a temporary fix: callers should really achieve this by using
 // RPC.stopProxy() on the resulting object, but this is currently not
 // working in trunk. See the discussion on HDFS-1965.
 Configuration confWithNoIpcIdle = new Configuration(conf);
 confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic
   .IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
 UserGroupInformation ticket = UserGroupInformation
   .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
 ticket.addToken(locatedBlock.getBlockToken());
 return createClientDatanodeProtocolProxy(addr, ticket, confWithNoIpcIdle,
   NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

Block block = lBlk.getBlock();
boolean isCorrupt = lBlk.isCorrupt();
String blkName = block.toString();
DatanodeInfo[] locs = lBlk.getLocations();
res.totalReplicas += locs.length;
if (locs.length == 0) {
 report.append(" MISSING!");
 res.addMissing(block.toString(), block.getNumBytes());
 missing++;
 missize += block.getNumBytes();

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

Block block = lBlk.getBlock();
boolean isCorrupt = lBlk.isCorrupt();
String blkName = block.toString();
DatanodeInfo[] locs = lBlk.getLocations();
res.totalReplicas += locs.length;
if (locs.length == 0) {
 report.append(" MISSING!");
 res.addMissing(block.toString(), block.getNumBytes());
 missing++;
 missize += block.getNumBytes();

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
  DatanodeID datanodeid, Configuration conf, int socketTimeout,
  boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
 final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
 InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
 if (LOG.isDebugEnabled()) {
  LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
 }
 
 // Since we're creating a new UserGroupInformation here, we know that no
 // future RPC proxies will be able to re-use the same connection. And
 // usages of this proxy tend to be one-off calls.
 //
 // This is a temporary fix: callers should really achieve this by using
 // RPC.stopProxy() on the resulting object, but this is currently not
 // working in trunk. See the discussion on HDFS-1965.
 Configuration confWithNoIpcIdle = new Configuration(conf);
 confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic
   .IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
 UserGroupInformation ticket = UserGroupInformation
   .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
 ticket.addToken(locatedBlock.getBlockToken());
 return createClientDatanodeProtocolProxy(addr, ticket, confWithNoIpcIdle,
   NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

@Test
public void testBlockTokenRpc() throws Exception {
 BlockTokenSecretManager sm = new BlockTokenSecretManager(true,
   blockKeyUpdateInterval, blockTokenLifetime);
 Token<BlockTokenIdentifier> token = sm.generateToken(block3,
   EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
 final Server server = createMockDatanode(sm, token);
 server.start();
 final InetSocketAddress addr = NetUtils.getConnectAddress(server);
 final UserGroupInformation ticket = UserGroupInformation
   .createRemoteUser(block3.toString());
 ticket.addToken(token);
 ClientDatanodeProtocol proxy = null;
 try {
  proxy = (ClientDatanodeProtocol)RPC.getProxy(
    ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, addr,
    ticket, conf, NetUtils.getDefaultSocketFactory(conf));
  assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
 } finally {
  server.stop();
  if (proxy != null) {
   RPC.stopProxy(proxy);
  }
 }
}

相关文章