org.apache.hadoop.fs.FileUtil.canWrite()方法的使用及代码示例

x33g5p2x  于2022-01-19 转载在 其他  
字(11.9k)|赞(0)|评价(0)|浏览(229)

本文整理了Java中org.apache.hadoop.fs.FileUtil.canWrite()方法的一些代码示例,展示了FileUtil.canWrite()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。FileUtil.canWrite()方法的具体详情如下:
包路径:org.apache.hadoop.fs.FileUtil
类名称:FileUtil
方法名:canWrite

FileUtil.canWrite介绍

[英]Platform independent implementation for File#canWrite()
[中]文件#canWrite()的平台独立实现

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

File parent = f.getParentFile();
File dir = null;
while (parent != null && FileUtil.canWrite(parent) &&
  parent.toString().startsWith(device)) {
 dir = parent;

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Checks that the current running process can read, write, and execute the
 * given directory by using methods of the File object.
 * 
 * @param dir File to check
 * @throws DiskErrorException if dir is not readable, not writable, or not
 *   executable
 */
private static void checkAccessByFileMethods(File dir)
  throws DiskErrorException {
 if (!dir.isDirectory()) {
  throw new DiskErrorException("Not a directory: "
    + dir.toString());
 }
 if (!FileUtil.canRead(dir)) {
  throw new DiskErrorException("Directory is not readable: "
                 + dir.toString());
 }
 if (!FileUtil.canWrite(dir)) {
  throw new DiskErrorException("Directory is not writable: "
                 + dir.toString());
 }
 if (!FileUtil.canExecute(dir)) {
  throw new DiskErrorException("Directory is not executable: "
                 + dir.toString());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * See if any of removed storages is "writable" again, and can be returned
 * into service.
 */
void attemptRestoreRemovedStorage() {
 // if directory is "alive" - copy the images there...
 if(!restoreFailedStorage || removedStorageDirs.size() == 0) {
  return; //nothing to restore
 }
 /* We don't want more than one thread trying to restore at a time */
 synchronized (this.restorationLock) {
  LOG.info("NNStorage.attemptRestoreRemovedStorage: check removed(failed) "+
       "storage. removedStorages size = {}", removedStorageDirs.size());
  for (StorageDirectory sd : this.removedStorageDirs) {
   File root = sd.getRoot();
   LOG.info("currently disabled dir {}; type={} ;canwrite={}", root
       .getAbsolutePath(), sd.getStorageDirType(),
     FileUtil.canWrite(root));
   if (root.exists() && FileUtil.canWrite(root)) {
    LOG.info("restoring dir {}", sd.getRoot().getAbsolutePath());
    this.addStorageDir(sd); // restore
    this.removedStorageDirs.remove(sd);
   }
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

if (FileUtil.canWrite(destFile) == false) {
 throw new IOException("Destination '" + destFile
   + "' exists but is read-only");

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

return StorageState.NON_EXISTENT;
if (!FileUtil.canWrite(root)) {
 LOG.warn("Cannot access storage directory {}", rootPath);
 return StorageState.NON_EXISTENT;

代码示例来源:origin: io.hops/hadoop-yarn-server-nodemanager

private void initializeControllerPaths() throws IOException {
 String controllerPath;
 Map<String, List<String>> parsedMtab = parseMtab();
 // CPU
 controllerPath = findControllerInMtab(CONTROLLER_CPU, parsedMtab);
 if (controllerPath != null) {
  File f = new File(controllerPath + "/" + this.cgroupPrefix);
  if (FileUtil.canWrite(f)) {
   controllerPaths.put(CONTROLLER_CPU, controllerPath);
  } else {
   throw new IOException("Not able to enforce cpu weights; cannot write "
     + "to cgroup at: " + controllerPath);
  }
 } else {
  throw new IOException("Not able to enforce cpu weights; cannot find "
    + "cgroup for cpu controller in " + getMtabFileName());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-nodemanager

private void initializeControllerPaths() throws IOException {
 String controllerPath;
 Map<String, List<String>> parsedMtab = parseMtab();
 // CPU
 controllerPath = findControllerInMtab(CONTROLLER_CPU, parsedMtab);
 if (controllerPath != null) {
  File f = new File(controllerPath + "/" + this.cgroupPrefix);
  if (FileUtil.canWrite(f)) {
   controllerPaths.put(CONTROLLER_CPU, controllerPath);
  } else {
   throw new IOException("Not able to enforce cpu weights; cannot write "
     + "to cgroup at: " + controllerPath);
  }
 } else {
  throw new IOException("Not able to enforce cpu weights; cannot find "
    + "cgroup for cpu controller in " + getMtabFileName());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * See if any of removed storages is "writable" again, and can be returned
 * into service.
 */
void attemptRestoreRemovedStorage() {
 // if directory is "alive" - copy the images there...
 if(!restoreFailedStorage || removedStorageDirs.size() == 0)
  return; //nothing to restore
 /* We don't want more than one thread trying to restore at a time */
 synchronized (this.restorationLock) {
  LOG.info("NNStorage.attemptRestoreRemovedStorage: check removed(failed) "+
       "storarge. removedStorages size = " + removedStorageDirs.size());
  for(Iterator<StorageDirectory> it
     = this.removedStorageDirs.iterator(); it.hasNext();) {
   StorageDirectory sd = it.next();
   File root = sd.getRoot();
   LOG.info("currently disabled dir " + root.getAbsolutePath() +
        "; type="+sd.getStorageDirType() 
        + ";canwrite="+FileUtil.canWrite(root));
   if(root.exists() && FileUtil.canWrite(root)) {
    LOG.info("restoring dir " + sd.getRoot().getAbsolutePath());
    this.addStorageDir(sd); // restore
    this.removedStorageDirs.remove(sd);
   }
  }
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-server-nodemanager

private void initializeControllerPaths() throws IOException {
 String controllerPath;
 Map<String, List<String>> parsedMtab = parseMtab();
 // CPU
 controllerPath = findControllerInMtab(CONTROLLER_CPU, parsedMtab);
 if (controllerPath != null) {
  File f = new File(controllerPath + "/" + this.cgroupPrefix);
  if (FileUtil.canWrite(f)) {
   controllerPaths.put(CONTROLLER_CPU, controllerPath);
  } else {
   throw new IOException("Not able to enforce cpu weights; cannot write "
     + "to cgroup at: " + controllerPath);
  }
 } else {
  throw new IOException("Not able to enforce cpu weights; cannot find "
    + "cgroup for cpu controller in " + getMtabFileName());
 }
}

代码示例来源:origin: io.hops/hadoop-yarn-server-nodemanager

if (FileUtil.canWrite(f)) {
 controllerPaths.put(CONTROLLER_CPU, cpuControllerPath);
} else {
File f = new File(devicesControllerPath + "/" + this.cgroupPrefix);
if (FileUtil.canWrite(f)) {
 controllerPaths.put(CONTROLLER_DEVICES, devicesControllerPath);
} else {

代码示例来源:origin: ch.cern.hadoop/hadoop-common

File parent = f.getParentFile();
File dir = null;
while (parent != null && FileUtil.canWrite(parent) &&
  parent.toString().startsWith(device)) {
 dir = parent;

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

File parent = f.getParentFile();
File dir = null;
while (parent != null && FileUtil.canWrite(parent) &&
  parent.toString().startsWith(device)) {
 dir = parent;

代码示例来源:origin: io.hops/hadoop-yarn-server-nodemanager

File f = new File(controllerPath + "/" + cGroupPrefix);
if (FileUtil.canWrite(f)) {
 ret.put(controller, controllerPath);
} else {

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * See if any of removed storages is "writable" again, and can be returned
 * into service.
 */
void attemptRestoreRemovedStorage() {
 // if directory is "alive" - copy the images there...
 if(!restoreFailedStorage || removedStorageDirs.size() == 0)
  return; //nothing to restore
 /* We don't want more than one thread trying to restore at a time */
 synchronized (this.restorationLock) {
  LOG.info("NNStorage.attemptRestoreRemovedStorage: check removed(failed) "+
       "storarge. removedStorages size = " + removedStorageDirs.size());
  for(Iterator<StorageDirectory> it
     = this.removedStorageDirs.iterator(); it.hasNext();) {
   StorageDirectory sd = it.next();
   File root = sd.getRoot();
   LOG.info("currently disabled dir " + root.getAbsolutePath() +
        "; type="+sd.getStorageDirType() 
        + ";canwrite="+FileUtil.canWrite(root));
   if(root.exists() && FileUtil.canWrite(root)) {
    LOG.info("restoring dir " + sd.getRoot().getAbsolutePath());
    this.addStorageDir(sd); // restore
    this.removedStorageDirs.remove(sd);
   }
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-nodemanager

private void initializeControllerPaths() throws IOException {
 String controllerPath;
 Map<String, Set<String>> parsedMtab = null;
 if (this.cgroupMountPath != null && !this.cgroupMount) {
  parsedMtab = ResourceHandlerModule.
    parseConfiguredCGroupPath(this.cgroupMountPath);
 }
 if (parsedMtab == null) {
  parsedMtab = parseMtab();
 }
 // CPU
 controllerPath = findControllerInMtab(CONTROLLER_CPU, parsedMtab);
 if (controllerPath != null) {
  File f = new File(controllerPath + "/" + this.cgroupPrefix);
  if (FileUtil.canWrite(f)) {
   controllerPaths.put(CONTROLLER_CPU, controllerPath);
  } else {
   throw new IOException("Not able to enforce cpu weights; cannot write "
     + "to cgroup at: " + controllerPath);
  }
 } else {
  throw new IOException("Not able to enforce cpu weights; cannot find "
    + "cgroup for cpu controller in " + getMtabFileName());
 }
}

代码示例来源:origin: io.hops/hadoop-common

/**
 * Checks that the current running process can read, write, and execute the
 * given directory by using methods of the File object.
 * 
 * @param dir File to check
 * @throws DiskErrorException if dir is not readable, not writable, or not
 *   executable
 */
private static void checkAccessByFileMethods(File dir)
  throws DiskErrorException {
 if (!dir.isDirectory()) {
  throw new DiskErrorException("Not a directory: "
    + dir.toString());
 }
 if (!FileUtil.canRead(dir)) {
  throw new DiskErrorException("Directory is not readable: "
                 + dir.toString());
 }
 if (!FileUtil.canWrite(dir)) {
  throw new DiskErrorException("Directory is not writable: "
                 + dir.toString());
 }
 if (!FileUtil.canExecute(dir)) {
  throw new DiskErrorException("Directory is not executable: "
                 + dir.toString());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

/**
  * Checks that the current running process can read, write, and execute the
  * given directory by using methods of the File object.
  * 
  * @param dir File to check
  * @throws DiskErrorException if dir is not readable, not writable, or not
  *   executable
  */
 private static void checkAccessByFileMethods(File dir)
   throws DiskErrorException {
  if (!FileUtil.canRead(dir)) {
   throw new DiskErrorException("Directory is not readable: "
                  + dir.toString());
  }

  if (!FileUtil.canWrite(dir)) {
   throw new DiskErrorException("Directory is not writable: "
                  + dir.toString());
  }

  if (!FileUtil.canExecute(dir)) {
   throw new DiskErrorException("Directory is not executable: "
                  + dir.toString());
  }
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
  * Checks that the current running process can read, write, and execute the
  * given directory by using methods of the File object.
  * 
  * @param dir File to check
  * @throws DiskErrorException if dir is not readable, not writable, or not
  *   executable
  */
 private static void checkAccessByFileMethods(File dir)
   throws DiskErrorException {
  if (!FileUtil.canRead(dir)) {
   throw new DiskErrorException("Directory is not readable: "
                  + dir.toString());
  }

  if (!FileUtil.canWrite(dir)) {
   throw new DiskErrorException("Directory is not writable: "
                  + dir.toString());
  }

  if (!FileUtil.canExecute(dir)) {
   throw new DiskErrorException("Directory is not executable: "
                  + dir.toString());
  }
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

/**
  * Checks that the current running process can read, write, and execute the
  * given directory by using methods of the File object.
  * 
  * @param dir File to check
  * @throws DiskErrorException if dir is not readable, not writable, or not
  *   executable
  */
 private static void checkAccessByFileMethods(File dir)
   throws DiskErrorException {
  if (!FileUtil.canRead(dir)) {
   throw new DiskErrorException("Directory is not readable: "
                  + dir.toString());
  }

  if (!FileUtil.canWrite(dir)) {
   throw new DiskErrorException("Directory is not writable: "
                  + dir.toString());
  }

  if (!FileUtil.canExecute(dir)) {
   throw new DiskErrorException("Directory is not executable: "
                  + dir.toString());
  }
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * @return a debug string which can help diagnose an error of why
 * a given directory might have a permissions error in the context
 * of a test case
 */
private String createPermissionsDiagnosisString(File path) {
 StringBuilder sb = new StringBuilder();
 while (path != null) { 
  sb.append("path '" + path + "': ").append("\n");
  sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
  sb.append("\tpermissions: ");
  sb.append(path.isDirectory() ? "d": "-");
  sb.append(FileUtil.canRead(path) ? "r" : "-");
  sb.append(FileUtil.canWrite(path) ? "w" : "-");
  sb.append(FileUtil.canExecute(path) ? "x" : "-");
  sb.append("\n");
  path = path.getParentFile();
 }
 return sb.toString();
}

相关文章