org.apache.hadoop.hive.ql.exec.Utilities.getSparkTasks()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(6.6k)|赞(0)|评价(0)|浏览(134)

本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.getSparkTasks()方法的一些代码示例,展示了Utilities.getSparkTasks()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.getSparkTasks()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:getSparkTasks

Utilities.getSparkTasks介绍

暂无

代码示例

代码示例来源:origin: apache/drill

public static List<SparkTask> getSparkTasks(List<Task<? extends Serializable>> tasks) {
 List<SparkTask> sparkTasks = new ArrayList<SparkTask>();
 if (tasks != null) {
  getSparkTasks(tasks, sparkTasks);
 }
 return sparkTasks;
}

代码示例来源:origin: apache/hive

public static int getNumClusterJobs(List<Task<? extends Serializable>> tasks) {
 return getMRTasks(tasks).size() + getTezTasks(tasks).size() + getSparkTasks(tasks).size();
}

代码示例来源:origin: apache/drill

private static void getSparkTasks(List<Task<? extends Serializable>> tasks,
 List<SparkTask> sparkTasks) {
 for (Task<? extends Serializable> task : tasks) {
  if (task instanceof SparkTask && !sparkTasks.contains(task)) {
   sparkTasks.add((SparkTask) task);
  }
  if (task.getDependentTasks() != null) {
   getSparkTasks(task.getDependentTasks(), sparkTasks);
  }
 }
}

代码示例来源:origin: apache/hive

private ExecutionMode getExecutionMode(QueryPlan plan, List<ExecDriver> mrTasks,
  List<TezTask> tezTasks) {
 if (tezTasks.size() > 0) {
  // Need to go in and check if any of the tasks is running in LLAP mode.
  for (TezTask tezTask : tezTasks) {
   if (tezTask.getWork().getLlapMode()) {
    return ExecutionMode.LLAP;
   }
  }
  return ExecutionMode.TEZ;
 } else if (mrTasks.size() > 0) {
  return ExecutionMode.MR;
 } else if (Utilities.getSparkTasks(plan.getRootTasks()).size() > 0) {
  return ExecutionMode.SPARK;
 } else {
  return ExecutionMode.NONE;
 }
}

代码示例来源:origin: apache/hive

protected ExecutionMode getExecutionMode(QueryPlan plan) {
 int numMRJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
 int numSparkJobs = Utilities.getSparkTasks(plan.getRootTasks()).size();
 int numTezJobs = Utilities.getTezTasks(plan.getRootTasks()).size();
 ExecutionMode mode = ExecutionMode.MR;
 if (0 == (numMRJobs + numSparkJobs + numTezJobs)) {
  mode = ExecutionMode.NONE;
 } else if (numSparkJobs > 0) {
  return ExecutionMode.SPARK;
 } else if (numTezJobs > 0) {
  mode = ExecutionMode.TEZ;
  // Need to go in and check if any of the tasks is running in LLAP mode.
  for (TezTask tezTask : Utilities.getTezTasks(plan.getRootTasks())) {
   if (tezTask.getWork().getLlapMode()) {
    mode = ExecutionMode.LLAP;
    break;
   }
  }
 }
 return mode;
}

代码示例来源:origin: apache/drill

protected ExecutionMode getExecutionMode(QueryPlan plan) {
 int numMRJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
 int numSparkJobs = Utilities.getSparkTasks(plan.getRootTasks()).size();
 int numTezJobs = Utilities.getTezTasks(plan.getRootTasks()).size();
 ExecutionMode mode = ExecutionMode.MR;
 if (0 == (numMRJobs + numSparkJobs + numTezJobs)) {
  mode = ExecutionMode.NONE;
 } else if (numSparkJobs > 0) {
  return ExecutionMode.SPARK;
 } else if (numTezJobs > 0) {
  mode = ExecutionMode.TEZ;
  // Need to go in and check if any of the tasks is running in LLAP mode.
  for (TezTask tezTask : Utilities.getTezTasks(plan.getRootTasks())) {
   if (tezTask.getWork().getLlapMode()) {
    mode = ExecutionMode.LLAP;
    break;
   }
  }
 }
 return mode;
}

代码示例来源:origin: apache/hive

List<SparkTask> sparkTasks = Utilities.getSparkTasks(tasks);
if (!sparkTasks.isEmpty()) {
 for (SparkTask sparkTask : sparkTasks) {

代码示例来源:origin: apache/hive

List<SparkTask> sparkTasks = Utilities.getSparkTasks(driver.getPlan().getRootTasks());
Assert.assertEquals(1, sparkTasks.size());

代码示例来源:origin: apache/hive

List<SparkTask> sparkTasks = Utilities.getSparkTasks(driver.getPlan().getRootTasks());
Assert.assertEquals(1, sparkTasks.size());

代码示例来源:origin: apache/hive

int mrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
int jobs = mrJobs + Utilities.getTezTasks(plan.getRootTasks()).size()
  + Utilities.getSparkTasks(plan.getRootTasks()).size();
if (jobs > 0) {
 logMrWarning(mrJobs);

代码示例来源:origin: apache/drill

int mrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
int jobs = mrJobs + Utilities.getTezTasks(plan.getRootTasks()).size()
  + Utilities.getSparkTasks(plan.getRootTasks()).size();
if (jobs > 0) {
 logMrWarning(mrJobs);

代码示例来源:origin: apache/hive

/**
 * This test tests that Utilities.get*Tasks do not repeat themselves in the process
 * of extracting tasks from a given set of root tasks when given DAGs that can have
 * multiple paths, such as the case with Diamond-shaped DAGs common to replication.
 */
@Test
public void testGetTasksHaveNoRepeats() {
 CountingWrappingTask mrTask = new CountingWrappingTask(new ExecDriver());
 CountingWrappingTask tezTask = new CountingWrappingTask(new TezTask());
 CountingWrappingTask sparkTask = new CountingWrappingTask(new SparkTask());
 // First check - we should not have repeats in results
 assertEquals("No repeated MRTasks from Utilities.getMRTasks", 1,
   Utilities.getMRTasks(getTestDiamondTaskGraph(mrTask)).size());
 assertEquals("No repeated TezTasks from Utilities.getTezTasks", 1,
   Utilities.getTezTasks(getTestDiamondTaskGraph(tezTask)).size());
 assertEquals("No repeated TezTasks from Utilities.getSparkTasks", 1,
   Utilities.getSparkTasks(getTestDiamondTaskGraph(sparkTask)).size());
 // Second check - the tasks we looked for must not have been accessed more than
 // once as a result of the traversal (note that we actually wind up accessing
 // 2 times , because each visit counts twice, once to check for existence, and
 // once to visit.
 assertEquals("MRTasks should have been visited only once", 2, mrTask.getDepCallCount());
 assertEquals("TezTasks should have been visited only once", 2, tezTask.getDepCallCount());
 assertEquals("SparkTasks should have been visited only once", 2, sparkTask.getDepCallCount());
}

代码示例来源:origin: apache/drill

tsk.setRetryCmdWhenFail(true);
List<SparkTask> sparkTasks = Utilities.getSparkTasks(rootTasks);
for (SparkTask sparkTask : sparkTasks) {
 sparkTask.setRetryCmdWhenFail(true);

代码示例来源:origin: com.facebook.presto.hive/hive-apache

public static List<SparkTask> getSparkTasks(List<Task<? extends Serializable>> tasks) {
 List<SparkTask> sparkTasks = new ArrayList<SparkTask>();
 if (tasks != null) {
  getSparkTasks(tasks, sparkTasks);
 }
 return sparkTasks;
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

private static void getSparkTasks(List<Task<? extends Serializable>> tasks,
 List<SparkTask> sparkTasks) {
 for (Task<? extends Serializable> task : tasks) {
  if (task instanceof SparkTask && !sparkTasks.contains(task)) {
   sparkTasks.add((SparkTask) task);
  }
  if (task.getDependentTasks() != null) {
   getSparkTasks(task.getDependentTasks(), sparkTasks);
  }
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

+ Utilities.getSparkTasks(plan.getRootTasks()).size();
if (jobs > 0) {
 console.printInfo("Query ID = " + plan.getQueryId());

代码示例来源:origin: com.facebook.presto.hive/hive-apache

tsk.setRetryCmdWhenFail(true);
List<SparkTask> sparkTasks = Utilities.getSparkTasks(rootTasks);
for (SparkTask sparkTask : sparkTasks) {
 sparkTask.setRetryCmdWhenFail(true);

相关文章

Utilities类方法