本文整理了Java中org.apache.hadoop.fs.FileUtil.replaceFile()
方法的一些代码示例,展示了FileUtil.replaceFile()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。FileUtil.replaceFile()
方法的具体详情如下:
包路径:org.apache.hadoop.fs.FileUtil
类名称:FileUtil
方法名:replaceFile
[英]Move the src file to the name specified by target.
[中]将src文件移动到目标指定的名称。
代码示例来源:origin: org.apache.hadoop/hadoop-common
FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath));
} catch (IOException e) {
terminate(1, "I/O error renaming jar temporary file to path: " +
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Move the src file to the target using
* {@link FileUtil#replaceFile(File, File)}.
*
* @param volume target volume. null if unavailable.
* @param src source path.
* @param target target path.
* @throws IOException
*/
public void replaceFile(
@Nullable FsVolumeSpi volume, File src, File target) throws IOException {
final long begin = profilingEventHook.beforeMetadataOp(volume, MOVE);
try {
faultInjectorEventHook.beforeMetadataOp(volume, MOVE);
FileUtil.replaceFile(src, target);
profilingEventHook.afterMetadataOp(volume, MOVE, begin);
} catch(Exception e) {
onFailure(volume, begin);
throw e;
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
LOG.info("Rolling forward previously half-completed synchronization: " +
tmp + " -> " + dst + " ; journal id: " + journalId);
FileUtil.replaceFile(tmp, dst);
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
FileUtil.replaceFile(syncedFile,
storage.getInProgressEditLog(segmentTxId));
代码示例来源:origin: com.facebook.hadoop/hadoop-core
/**
* Copy specified file into a temporary file. Then rename the
* temporary file to the original name. This will cause any
* hardlinks to the original file to be removed. The temporary
* files are created in the detachDir. The temporary files will
* be recovered (especially on Windows) on datanode restart.
*/
private void detachFile(int namespaceId, File file, Block b) throws IOException {
File tmpFile = volume.createDetachFile(namespaceId, b, file.getName());
try {
IOUtils.copyBytes(new FileInputStream(file),
new FileOutputStream(tmpFile),
16*1024, true);
if (file.length() != tmpFile.length()) {
throw new IOException("Copy of file " + file + " size " + file.length()+
" into file " + tmpFile +
" resulted in a size of " + tmpFile.length());
}
FileUtil.replaceFile(tmpFile, file);
} catch (IOException e) {
boolean done = tmpFile.delete();
if (!done) {
DataNode.LOG.info("detachFile failed to delete temporary file " +
tmpFile);
}
throw e;
}
}
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
/**
* Copy specified file into a temporary file. Then rename the
* temporary file to the original name. This will cause any
* hardlinks to the original file to be removed. The temporary
* files are created in the detachDir. The temporary files will
* be recovered (especially on Windows) on datanode restart.
*/
private void detachFile(File file, Block b) throws IOException {
File tmpFile = volume.createDetachFile(b, file.getName());
try {
IOUtils.copyBytes(new FileInputStream(file),
new FileOutputStream(tmpFile),
16*1024, true);
if (file.length() != tmpFile.length()) {
throw new IOException("Copy of file " + file + " size " + file.length()+
" into file " + tmpFile +
" resulted in a size of " + tmpFile.length());
}
FileUtil.replaceFile(tmpFile, file);
} catch (IOException e) {
boolean done = tmpFile.delete();
if (!done) {
DataNode.LOG.info("detachFile failed to delete temporary file " +
tmpFile);
}
throw e;
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
" resulted in a size of " + tmpFile.length());
FileUtil.replaceFile(tmpFile, file);
} catch (IOException e) {
boolean done = tmpFile.delete();
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
" resulted in a size of " + tmpFile.length());
FileUtil.replaceFile(tmpFile, file);
} catch (IOException e) {
boolean done = tmpFile.delete();
代码示例来源:origin: io.hops/hadoop-common
FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath));
} catch (IOException e) {
terminate(1, "I/O error renaming jar temporary file to path: " +
代码示例来源:origin: ch.cern.hadoop/hadoop-common
FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath));
} catch (IOException e) {
terminate(1, "I/O error renaming jar temporary file to path: " +
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath));
} catch (IOException e) {
terminate(1, "I/O error renaming jar temporary file to path: " +
代码示例来源:origin: com.github.jiayuhan-it/hadoop-common
FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath));
} catch (IOException e) {
terminate(1, "I/O error renaming jar temporary file to path: " +
代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test
FileUtil.replaceFile(new File(confDir, policyFileCopy), new File(confDir, HADOOP_POLICY_FILE));
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
LOG.info("Rolling forward previously half-completed synchronization: " +
tmp + " -> " + dst);
FileUtil.replaceFile(tmp, dst);
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
LOG.info("Rolling forward previously half-completed synchronization: " +
tmp + " -> " + dst);
FileUtil.replaceFile(tmp, dst);
代码示例来源:origin: ch.cern.hadoop/hadoop-common
final File targetFile = new File(tmp, "target");
assertTrue(!targetFile.exists());
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
assertTrue(targetFile.exists() && targetFile.isDirectory());
try {
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(false);
} catch (IOException ioe) {
代码示例来源:origin: com.github.jiayuhan-it/hadoop-common
final File targetFile = new File(tmp, "target");
assertTrue(!targetFile.exists());
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
assertTrue(targetFile.exists() && targetFile.isDirectory());
try {
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(false);
} catch (IOException ioe) {
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
FileUtil.replaceFile(syncedFile,
storage.getInProgressEditLog(segmentTxId));
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
FileUtil.replaceFile(syncedFile,
storage.getInProgressEditLog(segmentTxId));
内容来源于网络,如有侵权,请联系作者删除!