org.apache.hadoop.fs.FileUtil.canRead()方法的使用及代码示例

x33g5p2x  于2022-01-19 转载在 其他  
字(10.8k)|赞(0)|评价(0)|浏览(146)

本文整理了Java中org.apache.hadoop.fs.FileUtil.canRead()方法的一些代码示例,展示了FileUtil.canRead()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。FileUtil.canRead()方法的具体详情如下:
包路径:org.apache.hadoop.fs.FileUtil
类名称:FileUtil
方法名:canRead

FileUtil.canRead介绍

[英]Platform independent implementation for File#canRead()
[中]文件#canRead()的平台独立实现

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * A wrapper for {@link File#list()}. This java.io API returns null
 * when a dir is not a directory or for any I/O error. Instead of having
 * null check everywhere File#list() is used, we will add utility API
 * to get around this problem. For the majority of cases where we prefer
 * an IOException to be thrown.
 * @param dir directory for which listing should be performed
 * @return list of file names or empty string list
 * @exception AccessDeniedException for unreadable directory
 * @exception IOException for invalid directory or for bad disk
 */
public static String[] list(File dir) throws IOException {
 if (!canRead(dir)) {
  throw new AccessDeniedException(dir.toString(), null,
    FSExceptionMessages.PERMISSION_DENIED);
 }
 String[] fileNames = dir.list();
 if(fileNames == null) {
  throw new IOException("Invalid directory or I/O error occurred for dir: "
       + dir.toString());
 }
 return fileNames;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Checks that the current running process can read, write, and execute the
 * given directory by using methods of the File object.
 * 
 * @param dir File to check
 * @throws DiskErrorException if dir is not readable, not writable, or not
 *   executable
 */
private static void checkAccessByFileMethods(File dir)
  throws DiskErrorException {
 if (!dir.isDirectory()) {
  throw new DiskErrorException("Not a directory: "
    + dir.toString());
 }
 if (!FileUtil.canRead(dir)) {
  throw new DiskErrorException("Directory is not readable: "
                 + dir.toString());
 }
 if (!FileUtil.canWrite(dir)) {
  throw new DiskErrorException("Directory is not writable: "
                 + dir.toString());
 }
 if (!FileUtil.canExecute(dir)) {
  throw new DiskErrorException("Directory is not executable: "
                 + dir.toString());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

synchronized boolean moveTmpSegmentToCurrent(File tmpFile, File finalFile,
  long endTxId) throws IOException {
 final boolean success;
 if (endTxId <= committedTxnId.get()) {
  if (!finalFile.getParentFile().exists()) {
   LOG.error(finalFile.getParentFile() + " doesn't exist. Aborting tmp " +
     "segment move to current directory ; journal id: " + journalId);
   return false;
  }
  Files.move(tmpFile.toPath(), finalFile.toPath(),
    StandardCopyOption.ATOMIC_MOVE);
  if (finalFile.exists() && FileUtil.canRead(finalFile)) {
   success = true;
  } else {
   success = false;
   LOG.warn("Unable to move edits file from " + tmpFile + " to " +
     finalFile + " ; journal id: " + journalId);
  }
 } else {
  success = false;
  LOG.error("The endTxId of the temporary file is not less than the " +
    "last committed transaction id. Aborting move to final file" +
    finalFile + " ; journal id: " + journalId);
 }
 return success;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Determine the checkpoint time of the specified StorageDirectory
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last checkpoint time. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readCheckpointTime(StorageDirectory sd) throws IOException {
 File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
 long timeStamp = 0L;
 if (timeFile.exists() && FileUtil.canRead(timeFile)) {
  DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
  try {
   timeStamp = in.readLong();
   in.close();
   in = null;
  } finally {
   IOUtils.cleanupWithLogger(LOG, in);
  }
 }
 return timeStamp;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * @return The first image file with the given txid and image type.
 */
public File getFsImageName(long txid, NameNodeFile nnf) {
 for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE);
   it.hasNext();) {
  StorageDirectory sd = it.next();
  File fsImage = getStorageFile(sd, nnf, txid);
  if (FileUtil.canRead(sd.getRoot()) && fsImage.exists()) {
   return fsImage;
  }
 }
 return null;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * @return The first image file whose txid is the same with the given txid and
 * image type is one of the given types.
 */
public File getFsImage(long txid, EnumSet<NameNodeFile> nnfs) {
 for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE);
   it.hasNext();) {
  StorageDirectory sd = it.next();
  for (NameNodeFile nnf : nnfs) {
   File fsImage = getStorageFile(sd, nnf, txid);
   if (FileUtil.canRead(sd.getRoot()) && fsImage.exists()) {
    return fsImage;
   }
  }
 }
 return null;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Return the first readable storage file of the given name
 * across any of the 'current' directories in SDs of the
 * given type, or null if no such file exists.
 */
private File findFile(NameNodeDirType dirType, String name) {
 for (StorageDirectory sd : dirIterable(dirType)) {
  File candidate = new File(sd.getCurrentDir(), name);
  if (FileUtil.canRead(sd.getCurrentDir()) &&
    candidate.exists()) {
   return candidate;
  }
 }
 return null;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

log.getEndTxId());
if (finalEditsFile.exists() && FileUtil.canRead(finalEditsFile)) {
 LOG.info("Skipping download of remote edit log " + log + " since it's" +
   " already stored locally at " + finalEditsFile);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

if (f.exists() && FileUtil.canRead(f)) {
 LOG.info("Skipping download of remote edit log " +
   log + " since it already is stored locally at " + f);

代码示例来源:origin: ch.cern.hadoop/hadoop-streaming

private void validate(final List<String> values)
throws IllegalArgumentException {
 for (String file : values) {
  File f = new File(file);
  if (!FileUtil.canRead(f)) {
   fail("File: " + f.getAbsolutePath()
    + " does not exist, or is not readable.");
  }
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Determine the checkpoint time of the specified StorageDirectory
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last checkpoint time. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readCheckpointTime(StorageDirectory sd) throws IOException {
 File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
 long timeStamp = 0L;
 if (timeFile.exists() && FileUtil.canRead(timeFile)) {
  DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
  try {
   timeStamp = in.readLong();
   in.close();
   in = null;
  } finally {
   IOUtils.cleanup(LOG, in);
  }
 }
 return timeStamp;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * @return The first image file with the given txid and image type.
 */
public File getFsImageName(long txid, NameNodeFile nnf) {
 for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE);
   it.hasNext();) {
  StorageDirectory sd = it.next();
  File fsImage = getStorageFile(sd, nnf, txid);
  if (FileUtil.canRead(sd.getRoot()) && fsImage.exists()) {
   return fsImage;
  }
 }
 return null;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Determine the checkpoint time of the specified StorageDirectory
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last checkpoint time. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readCheckpointTime(StorageDirectory sd) throws IOException {
 File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
 long timeStamp = 0L;
 if (timeFile.exists() && FileUtil.canRead(timeFile)) {
  DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
  try {
   timeStamp = in.readLong();
   in.close();
   in = null;
  } finally {
   IOUtils.cleanup(LOG, in);
  }
 }
 return timeStamp;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * @return The first image file with the given txid and image type.
 */
public File getFsImageName(long txid, NameNodeFile nnf) {
 for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE);
   it.hasNext();) {
  StorageDirectory sd = it.next();
  File fsImage = getStorageFile(sd, nnf, txid);
  if (FileUtil.canRead(sd.getRoot()) && fsImage.exists()) {
   return fsImage;
  }
 }
 return null;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * @return The first image file whose txid is the same with the given txid and
 * image type is one of the given types.
 */
public File getFsImage(long txid, EnumSet<NameNodeFile> nnfs) {
 for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE);
   it.hasNext();) {
  StorageDirectory sd = it.next();
  for (NameNodeFile nnf : nnfs) {
   File fsImage = getStorageFile(sd, nnf, txid);
   if (FileUtil.canRead(sd.getRoot()) && fsImage.exists()) {
    return fsImage;
   }
  }
 }
 return null;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Return the first readable storage file of the given name
 * across any of the 'current' directories in SDs of the
 * given type, or null if no such file exists.
 */
private File findFile(NameNodeDirType dirType, String name) {
 for (StorageDirectory sd : dirIterable(dirType)) {
  File candidate = new File(sd.getCurrentDir(), name);
  if (FileUtil.canRead(sd.getCurrentDir()) &&
    candidate.exists()) {
   return candidate;
  }
 }
 return null;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * @return The first image file whose txid is the same with the given txid and
 * image type is one of the given types.
 */
public File getFsImage(long txid, EnumSet<NameNodeFile> nnfs) {
 for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE);
   it.hasNext();) {
  StorageDirectory sd = it.next();
  for (NameNodeFile nnf : nnfs) {
   File fsImage = getStorageFile(sd, nnf, txid);
   if (FileUtil.canRead(sd.getRoot()) && fsImage.exists()) {
    return fsImage;
   }
  }
 }
 return null;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Return the first readable storage file of the given name
 * across any of the 'current' directories in SDs of the
 * given type, or null if no such file exists.
 */
private File findFile(NameNodeDirType dirType, String name) {
 for (StorageDirectory sd : dirIterable(dirType)) {
  File candidate = new File(sd.getCurrentDir(), name);
  if (FileUtil.canRead(sd.getCurrentDir()) &&
    candidate.exists()) {
   return candidate;
  }
 }
 return null;
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

/**
  * Checks that the current running process can read, write, and execute the
  * given directory by using methods of the File object.
  * 
  * @param dir File to check
  * @throws DiskErrorException if dir is not readable, not writable, or not
  *   executable
  */
 private static void checkAccessByFileMethods(File dir)
   throws DiskErrorException {
  if (!FileUtil.canRead(dir)) {
   throw new DiskErrorException("Directory is not readable: "
                  + dir.toString());
  }

  if (!FileUtil.canWrite(dir)) {
   throw new DiskErrorException("Directory is not writable: "
                  + dir.toString());
  }

  if (!FileUtil.canExecute(dir)) {
   throw new DiskErrorException("Directory is not executable: "
                  + dir.toString());
  }
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * @return a debug string which can help diagnose an error of why
 * a given directory might have a permissions error in the context
 * of a test case
 */
private String createPermissionsDiagnosisString(File path) {
 StringBuilder sb = new StringBuilder();
 while (path != null) { 
  sb.append("path '" + path + "': ").append("\n");
  sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
  sb.append("\tpermissions: ");
  sb.append(path.isDirectory() ? "d": "-");
  sb.append(FileUtil.canRead(path) ? "r" : "-");
  sb.append(FileUtil.canWrite(path) ? "w" : "-");
  sb.append(FileUtil.canExecute(path) ? "x" : "-");
  sb.append("\n");
  path = path.getParentFile();
 }
 return sb.toString();
}

相关文章