org.apache.hadoop.hdfs.protocol.Block.isBlockFilename()方法的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(5.1k)|赞(0)|评价(0)|浏览(213)

本文整理了Java中org.apache.hadoop.hdfs.protocol.Block.isBlockFilename()方法的一些代码示例,展示了Block.isBlockFilename()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Block.isBlockFilename()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.protocol.Block
类名称:Block
方法名:isBlockFilename

Block.isBlockFilename介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

continue;
if (!Block.isBlockFilename(file)) {
 if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, file.getName())) {
  long blockId = Block.getBlockId(file.getName());

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

if (!Block.isBlockFilename(file)) {
 continue;

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

if (files[idx].isDirectory()) {
 numChildren++;
} else if (Block.isBlockFilename(files[idx])) {
 numBlocks++;

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/**
 * Populate the given blockSet with any child blocks
 * found at this node.
 */
public void getBlockInfo(TreeSet<Block> blockSet) {
 if (children != null) {
  for (int i = 0; i < children.length; i++) {
   children[i].getBlockInfo(blockSet);
  }
 }
 File blockFiles[] = dir.listFiles();
 for (int i = 0; i < blockFiles.length; i++) {
  if (Block.isBlockFilename(blockFiles[i])) {
   long genStamp = getGenerationStampFromFile(blockFiles, blockFiles[i]);
   blockSet.add(new Block(blockFiles[i], blockFiles[i].length(), genStamp));
  }
 }
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap, FSVolume volume) {
 if (children != null) {
  for (int i = 0; i < children.length; i++) {
   children[i].getVolumeMap(volumeMap, volume);
  }
 }
 File blockFiles[] = dir.listFiles();
 for (int i = 0; i < blockFiles.length; i++) {
  if (Block.isBlockFilename(blockFiles[i])) {
   long genStamp = getGenerationStampFromFile(blockFiles, blockFiles[i]);
   volumeMap.put(new Block(blockFiles[i], blockFiles[i].length(), genStamp), 
          new DatanodeBlockInfo(volume, blockFiles[i]));
  }
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

void getBlocksBeingWrittenInfo(LightWeightHashSet<Block> blockSet) { 
 if (rbwDir == null) {
  return;
 }
 File[] blockFiles = rbwDir.listFiles();
 if (blockFiles == null) {
  return;
 }
 String[] blockFileNames = getFileNames(blockFiles);  
 for (int i = 0; i < blockFiles.length; i++) {
  if (!blockFiles[i].isDirectory()) {
  // get each block in the rbwDir direcotry
   if (Block.isBlockFilename(blockFileNames[i])) {
    long genStamp = FSDataset.getGenerationStampFromFile(
      blockFileNames, blockFileNames[i]);
    Block block = 
     new Block(blockFiles[i], blockFiles[i].length(), genStamp);
    
    // add this block to block set
    blockSet.add(block);
    if (DataNode.LOG.isDebugEnabled()) {
     DataNode.LOG.debug("recoverBlocksBeingWritten for block " + block);
    }
   }
  }
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

continue;
if (!Block.isBlockFilename(files[i])) {
 if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, files[i].getName())) {
  long blockId = Block.getBlockId(files[i].getName());

代码示例来源:origin: com.facebook.hadoop/hadoop-core

} else if (file.isDirectory()) {
 numChildren++;
} else if (Block.isBlockFilename(fileName)) {
 numBlocks++;
 if (volume != null) {

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

continue;
if (!Block.isBlockFilename(files[i])) {
 if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, files[i].getName())) {
  long blockId = Block.getBlockId(files[i].getName());

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 * Populate the given blockSet with any child blocks
 * found at this node.
 */
public void getBlockInfo(LightWeightHashSet<Block> blockSet) {
 FSDir[] children = this.getChildren();
 if (children != null) {
  for (int i = 0; i < children.length; i++) {
   children[i].getBlockInfo(blockSet);
  }
 }
 File blockFiles[] = dir.listFiles();
 String[] blockFilesNames = getFileNames(blockFiles);
 
 for (int i = 0; i < blockFiles.length; i++) {
  if (Block.isBlockFilename(blockFilesNames[i])) {
   long genStamp = FSDataset.getGenerationStampFromFile(blockFilesNames,
     blockFilesNames[i]);
   blockSet.add(new Block(blockFiles[i], blockFiles[i].length(), genStamp));
  }
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 * Populate the given blockSet with any child blocks
 * found at this node. With each block, return the full path
 * of the block file.
 */
void getBlockAndFileInfo(LightWeightHashSet<BlockAndFile> blockSet) {
 FSDir[] children = this.getChildren();
 if (children != null) {
  for (int i = 0; i < children.length; i++) {
   children[i].getBlockAndFileInfo(blockSet);
  }
 }
 File blockFiles[] = dir.listFiles();
 String[] blockFilesNames = getFileNames(blockFiles);      
 for (int i = 0; i < blockFiles.length; i++) {
  if (Block.isBlockFilename(blockFilesNames[i])) {
   long genStamp = FSDataset.getGenerationStampFromFile(blockFilesNames,
     blockFilesNames[i]);
   Block block = new Block(blockFiles[i], blockFiles[i].length(), genStamp);
   blockSet.add(new BlockAndFile(blockFiles[i].getAbsoluteFile(), block));
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

File rbwDir = new File(currentDir, "rbw");
for (File file : rbwDir.listFiles()) {
 if (isCorrupt && Block.isBlockFilename(file)) {
  new RandomAccessFile(file, "rw").setLength(fileLen-1); // corrupt

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

if (!Block.isBlockFilename(file))
 continue;

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

if (!Block.isBlockFilename(file))
 continue;

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

File rbwDir = new File(currentDir, "rbw");
for (File file : rbwDir.listFiles()) {
 if (isCorrupt && Block.isBlockFilename(file)) {
  new RandomAccessFile(file, "rw").setLength(fileLen-1); // corrupt

相关文章