本文整理了Java中org.apache.hadoop.tools.DistCp.getConf()
方法的一些代码示例,展示了DistCp.getConf()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DistCp.getConf()
方法的具体详情如下:
包路径:org.apache.hadoop.tools.DistCp
类名称:DistCp
方法名:getConf
暂无
代码示例来源:origin: apache/hbase
for (Path aSrc : srcs) {
totalSrcLgth +=
BackupUtils.getFilesLength(aSrc.getFileSystem(super.getConf()), aSrc);
代码示例来源:origin: ch.cern.hadoop/hadoop-distcp
/**
* Set targetPathExists in both inputOptions and job config,
* for the benefit of CopyCommitter
*/
private void setTargetPathExists() throws IOException {
Path target = inputOptions.getTargetPath();
FileSystem targetFS = target.getFileSystem(getConf());
boolean targetExists = targetFS.exists(target);
inputOptions.setTargetPathExists(targetExists);
getConf().setBoolean(DistCpConstants.CONF_LABEL_TARGET_PATH_EXISTS,
targetExists);
}
/**
代码示例来源:origin: org.apache.hadoop/hadoop-distcp
/**
* Set targetPathExists in both inputOptions and job config,
* for the benefit of CopyCommitter
*/
private void setTargetPathExists() throws IOException {
Path target = context.getTargetPath();
FileSystem targetFS = target.getFileSystem(getConf());
boolean targetExists = targetFS.exists(target);
context.setTargetPathExists(targetExists);
getConf().setBoolean(DistCpConstants.CONF_LABEL_TARGET_PATH_EXISTS,
targetExists);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-distcp
/**
* Create a default working folder for the job, under the
* job staging directory
*
* @return Returns the working folder information
* @throws Exception - EXception if any
*/
private Path createMetaFolderPath() throws Exception {
Configuration configuration = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(
new Cluster(configuration), configuration);
Path metaFolderPath = new Path(stagingDir, PREFIX + String.valueOf(rand.nextInt()));
if (LOG.isDebugEnabled())
LOG.debug("Meta folder location: " + metaFolderPath);
configuration.set(DistCpConstants.CONF_LABEL_META_FOLDER, metaFolderPath.toString());
return metaFolderPath;
}
代码示例来源:origin: org.apache.hadoop/hadoop-distcp
/**
* Create a default working folder for the job, under the
* job staging directory
*
* @return Returns the working folder information
* @throws Exception - Exception if any
*/
private Path createMetaFolderPath() throws Exception {
Configuration configuration = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(
new Cluster(configuration), configuration);
Path metaFolderPath = new Path(stagingDir, PREFIX + String.valueOf(rand.nextInt()));
if (LOG.isDebugEnabled())
LOG.debug("Meta folder location: " + metaFolderPath);
configuration.set(DistCpConstants.CONF_LABEL_META_FOLDER, metaFolderPath.toString());
return metaFolderPath;
}
代码示例来源:origin: org.apache.hadoop/hadoop-distcp
/**
* Create Job object for submitting it, with all the configuration
*
* @return Reference to job object.
* @throws IOException - Exception if any
*/
private Job createJob() throws IOException {
String jobName = "distcp";
String userChosenName = getConf().get(JobContext.JOB_NAME);
if (userChosenName != null)
jobName += ": " + userChosenName;
Job job = Job.getInstance(getConf());
job.setJobName(jobName);
job.setInputFormatClass(DistCpUtils.getStrategy(getConf(), context));
job.setJarByClass(CopyMapper.class);
configureOutputFormat(job);
job.setMapperClass(CopyMapper.class);
job.setNumReduceTasks(0);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputFormatClass(CopyOutputFormat.class);
job.getConfiguration().set(JobContext.MAP_SPECULATIVE, "false");
job.getConfiguration().set(JobContext.NUM_MAPS,
String.valueOf(context.getMaxMaps()));
context.appendToConf(job.getConfiguration());
return job;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-distcp
/**
* Create Job object for submitting it, with all the configuration
*
* @return Reference to job object.
* @throws IOException - Exception if any
*/
private Job createJob() throws IOException {
String jobName = "distcp";
String userChosenName = getConf().get(JobContext.JOB_NAME);
if (userChosenName != null)
jobName += ": " + userChosenName;
Job job = Job.getInstance(getConf());
job.setJobName(jobName);
job.setInputFormatClass(DistCpUtils.getStrategy(getConf(), inputOptions));
job.setJarByClass(CopyMapper.class);
configureOutputFormat(job);
job.setMapperClass(CopyMapper.class);
job.setNumReduceTasks(0);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputFormatClass(CopyOutputFormat.class);
job.getConfiguration().set(JobContext.MAP_SPECULATIVE, "false");
job.getConfiguration().set(JobContext.NUM_MAPS,
String.valueOf(inputOptions.getMaxMaps()));
if (inputOptions.getSslConfigurationFile() != null) {
setupSSLConfig(job);
}
inputOptions.appendToConf(job.getConfiguration());
return job;
}
代码示例来源:origin: org.apache.hadoop/hadoop-distcp
/**
* Check splitting large files is supported and populate configs.
*/
private void checkSplitLargeFile() throws IOException {
if (!context.splitLargeFile()) {
return;
}
final Path target = context.getTargetPath();
final FileSystem targetFS = target.getFileSystem(getConf());
try {
Path[] src = null;
Path tgt = null;
targetFS.concat(tgt, src);
} catch (UnsupportedOperationException use) {
throw new UnsupportedOperationException(
DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch() +
" is not supported since the target file system doesn't" +
" support concat.", use);
} catch (Exception e) {
// Ignore other exception
}
LOG.info("Set " +
DistCpConstants.CONF_LABEL_SIMPLE_LISTING_RANDOMIZE_FILES
+ " to false since " + DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch()
+ " is passed.");
getConf().setBoolean(
DistCpConstants.CONF_LABEL_SIMPLE_LISTING_RANDOMIZE_FILES, false);
}
代码示例来源:origin: org.apache.hadoop/hadoop-distcp
/**
* Create and submit the mapreduce job.
* @return The mapreduce job object that has been submitted
*/
public Job createAndSubmitJob() throws Exception {
assert context != null;
assert getConf() != null;
Job job = null;
try {
synchronized(this) {
//Don't cleanup while we are setting up.
metaFolder = createMetaFolderPath();
jobFS = metaFolder.getFileSystem(getConf());
job = createJob();
}
prepareFileListing(job);
job.submit();
submitted = true;
} finally {
if (!submitted) {
cleanup();
}
}
String jobID = job.getJobID().toString();
job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID,
jobID);
LOG.info("DistCp job-id: " + jobID);
return job;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-distcp
assert getConf() != null;
Job job = null;
try {
jobFS = metaFolder.getFileSystem(getConf());
job = createJob();
if (!DistCpSync.sync(inputOptions, getConf())) {
inputOptions.disableUsingDiff();
代码示例来源:origin: org.apache.hadoop/hadoop-distcp
private void prepareFileListing(Job job) throws Exception {
if (context.shouldUseSnapshotDiff()) {
// When "-diff" or "-rdiff" is passed, do sync() first, then
// create copyListing based on snapshot diff.
DistCpSync distCpSync = new DistCpSync(context, getConf());
if (distCpSync.sync()) {
createInputFileListingWithDiff(job, distCpSync);
} else {
throw new Exception("DistCp sync failed, input options: " + context);
}
} else {
// When no "-diff" or "-rdiff" is passed, create copyListing
// in regular way.
createInputFileListing(job);
}
}
内容来源于网络,如有侵权,请联系作者删除!