org.apache.hadoop.fs.FileUtil.stat2Paths()方法的使用及代码示例

x33g5p2x  于2022-01-19 转载在 其他  
字(9.1k)|赞(0)|评价(0)|浏览(353)

本文整理了Java中org.apache.hadoop.fs.FileUtil.stat2Paths()方法的一些代码示例,展示了FileUtil.stat2Paths()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。FileUtil.stat2Paths()方法的具体详情如下:
包路径:org.apache.hadoop.fs.FileUtil
类名称:FileUtil
方法名:stat2Paths

FileUtil.stat2Paths介绍

[英]convert an array of FileStatus to an array of Path
[中]将FileStatus数组转换为Path数组

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * convert an array of FileStatus to an array of Path.
 * If stats if null, return path
 * @param stats
 *          an array of FileStatus objects
 * @param path
 *          default path to return in stats is null
 * @return an array of paths corresponding to the input
 */
public static Path[] stat2Paths(FileStatus[] stats, Path path) {
 if (stats == null)
  return new Path[]{path};
 else
  return stat2Paths(stats);
}

代码示例来源:origin: apache/hbase

} else {
 boolean validFileEncountered = false;
 for (Path path : FileUtil.stat2Paths(fileStatuses)) {  // for each file that match the pattern
  if (fs.isFile(path)) {  // only process files, skip for directories
   File dst = new File(parentDirStr, "." + pathPrefix + "."

代码示例来源:origin: apache/hbase

Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
for (Path hfile : hfiles) {
 if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".")

代码示例来源:origin: apache/hbase

@Test
public void testLogDirectoryShouldBeDeletedAfterSuccessfulSplit()
  throws IOException {
 generateWALs(-1);
 useDifferentDFSClient();
 WALSplitter.split(HBASELOGDIR, WALDIR, OLDLOGDIR, fs, conf, wals);
 FileStatus [] statuses = null;
 try {
  statuses = fs.listStatus(WALDIR);
  if (statuses != null) {
   fail("Files left in log dir: " +
     Joiner.on(",").join(FileUtil.stat2Paths(statuses)));
  }
 } catch (FileNotFoundException e) {
  // hadoop 0.21 throws FNFE whereas hadoop 0.20 returns null
 }
}

代码示例来源:origin: thinkaurelius/faunus

public static boolean globDelete(final FileSystem fs, final String path, final boolean recursive) throws IOException {
    boolean deleted = false;
    for (final Path p : FileUtil.stat2Paths(fs.globStatus(new Path(path)))) {
      fs.delete(p, recursive);
      deleted = true;
    }
    return deleted;
  }
}

代码示例来源:origin: iflytek/Guitar

public static List<Path> parseWordcardDir(FileSystem fs, String wordcardDir) throws IOException {
  List<Path> lstPath = Lists.newArrayList();
  FileStatus[] aStatus = fs.globStatus(new Path(wordcardDir));
  if (null == aStatus || aStatus.length <= 0) {
    return lstPath;
  }
  Path[] tmpLstPath = FileUtil.stat2Paths(aStatus);
  lstPath.addAll(Arrays.asList(tmpLstPath));
  return lstPath;
}

代码示例来源:origin: com.moz.fiji.mapreduce/fiji-mapreduce

/** Open the output generated by this format. */
public static MapFile.Reader[] getReaders(Path dir,
  Configuration conf) throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 MapFile.Reader[] parts = new MapFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new MapFile.Reader(fs, names[i].toString(), conf);
 }
 return parts;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core

/** Open the output generated by this format. */
public static SequenceFile.Reader[] getReaders(Configuration conf, Path dir)
 throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 
 SequenceFile.Reader[] parts = new SequenceFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new SequenceFile.Reader(fs, names[i], conf);
 }
 return parts;
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

/** Open the output generated by this format. */
public static SequenceFile.Reader[] getReaders(Configuration conf, Path dir)
 throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 
 SequenceFile.Reader[] parts = new SequenceFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new SequenceFile.Reader(fs, names[i], conf);
 }
 return parts;
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/** Open the output generated by this format. */
public static SequenceFile.Reader[] getReaders(Configuration conf, Path dir)
 throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 
 SequenceFile.Reader[] parts = new SequenceFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new SequenceFile.Reader(fs, names[i], conf);
 }
 return parts;
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/** Open the output generated by this format. */
public static SequenceFile.Reader[] getReaders(Configuration conf, Path dir)
 throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 
 SequenceFile.Reader[] parts = new SequenceFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new SequenceFile.Reader(fs, names[i], conf);
 }
 return parts;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/** Open the output generated by this format. */
public static MapFile.Reader[] getReaders(Path dir,
  Configuration conf) throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 
 MapFile.Reader[] parts = new MapFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new MapFile.Reader(fs, names[i].toString(), conf);
 }
 return parts;
}

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

/** Open the output generated by this format. */
public static SequenceFile.Reader[] getReaders(Configuration conf, Path dir)
 throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 
 SequenceFile.Reader[] parts = new SequenceFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new SequenceFile.Reader(fs, names[i], conf);
 }
 return parts;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core

/** Open the output generated by this format. */
public static MapFile.Reader[] getReaders(Path dir,
  Configuration conf) throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 
 MapFile.Reader[] parts = new MapFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new MapFile.Reader(fs, names[i].toString(), conf);
 }
 return parts;
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-core

/** Open the output generated by this format. */
public static MapFile.Reader[] getReaders(Path dir,
  Configuration conf) throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 
 MapFile.Reader[] parts = new MapFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new MapFile.Reader(fs, names[i].toString(), conf);
 }
 return parts;
}

代码示例来源:origin: izenecloud/laser

protected Path[] getFilePaths(Configuration conf, FileSystem fs,
    Path filePath) throws IOException {
  FileStatus[] hdfsFiles = fs.listStatus(filePath);
  Path[] hdfsFilePaths = FileUtil.stat2Paths(hdfsFiles);
  List<Path> files = new ArrayList<Path>();
  for (Path hdfsFilePath : hdfsFilePaths) {
    FileStatus fileStatus = fs.getFileStatus(hdfsFilePath);
    if (!fileStatus.isDir()) {
      files.add(hdfsFilePath);
    }
  }
  return files.toArray(new Path[0]);
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-core

/** Open the output generated by this format. */
public static SequenceFile.Reader[] getReaders(Configuration conf, Path dir)
 throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 
 SequenceFile.Reader[] parts = new SequenceFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new SequenceFile.Reader(fs, names[i], conf);
 }
 return parts;
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

/** Open the output generated by this format. */
public static MapFile.Reader[] getReaders(Path dir,
  Configuration conf) throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 
 MapFile.Reader[] parts = new MapFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new MapFile.Reader(fs, names[i].toString(), conf);
 }
 return parts;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/** Open the output generated by this format. */
public static SequenceFile.Reader[] getReaders(Configuration conf, Path dir)
 throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
 Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
 
 // sort names, so that hash partitioning works
 Arrays.sort(names);
 
 SequenceFile.Reader[] parts = new SequenceFile.Reader[names.length];
 for (int i = 0; i < names.length; i++) {
  parts[i] = new SequenceFile.Reader(fs, names[i], conf);
 }
 return parts;
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

private void verifyOutput(FileSystem fs, Path outDir) throws IOException {
 Path[] outputFiles = FileUtil.stat2Paths(
   fs.listStatus(outDir, new Utils.OutputFileUtils.OutputFilesFilter()));
 assertEquals(numReduces, outputFiles.length);
 InputStream is = fs.open(outputFiles[0]);
 BufferedReader reader = new BufferedReader(new InputStreamReader(is));
 String s = reader.readLine().split("\t")[1];
 assertEquals("b a",s);
 assertNull(reader.readLine());
 reader.close();
}

相关文章