本文整理了Java中org.apache.hadoop.mapred.TaskReport
类的一些代码示例,展示了TaskReport
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。TaskReport
类的具体详情如下:
包路径:org.apache.hadoop.mapred.TaskReport
类名称:TaskReport
[英]A report on the state of a task.
[中]关于任务状态的报告。
代码示例来源:origin: twitter/ambrose
if (report.getStartTime()
TIPStatus status = report.getCurrentStatus();
if (status != TIPStatus.PENDING && status != TIPStatus.RUNNING) {
finishedMappersCount++;
if (jobLastUpdateTime
TIPStatus status = report.getCurrentStatus();
if (status != TIPStatus.PENDING && status != TIPStatus.RUNNING) {
finishedReducersCount++;
代码示例来源:origin: linkedin/camus
if (tasks.length > 0) {
for (TaskReport task : client.getMapTaskReports(tasks[0].getTaskAttemptId().getJobID())) {
if (task.getCurrentStatus().equals(TIPStatus.FAILED)) {
for (String s : task.getDiagnostics()) {
System.err.println("task error: " + s);
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
@Override
public boolean equals(Object o) {
if(o == null)
return false;
if(o.getClass().equals(TaskReport.class)) {
TaskReport report = (TaskReport) o;
return counters.contentEquals(report.getCounters())
&& Arrays.toString(this.diagnostics)
.equals(Arrays.toString(report.getDiagnostics()))
&& this.finishTime == report.getFinishTime()
&& this.progress == report.getProgress()
&& this.startTime == report.getStartTime()
&& this.state.equals(report.getState())
&& this.taskid.equals(report.getTaskID());
}
return false;
}
代码示例来源:origin: cdapio/cdap
private List
List
for (TaskReport taskReport : taskReports) {
taskInfos.add(new MRTaskInfo(taskReport.getTaskId(), taskReport.getState(),
taskReport.getStartTime(), taskReport.getFinishTime(), taskReport.getProgress(),
groupToMap(taskReport.getCounters().getGroup(TaskCounter.class.getName()))));
}
return taskInfos;
}
代码示例来源:origin: com.linkedin.camus/camus-etl-kafka
代码示例来源:origin: apache/datafu 代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core 代码示例来源:origin: stackoverflow.com 代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core 代码示例来源:origin: com.facebook.hadoop/hadoop-core 代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-core 代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test 代码示例来源:origin: org.apache.hadoop/hadoop-mapred 代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core 代码示例来源:origin: com.facebook.hadoop/hadoop-core 代码示例来源:origin: LiveRamp/cascading_ext 代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test 代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test 代码示例来源:origin: com.facebook.hadoop/hadoop-core 代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclientlong wait = t.getStartTime() - timingMap.get("hadoop_start");
min = wait
mean += wait;
long runTime = t.getFinishTime() - t.getStartTime();
totalTaskTime += runTime;
minRun = runTime for (TaskReport report : jobClient.getSetupTaskReports(oldJobId))
taskIdToType.put(report.getTaskID().toString(),"SETUP");
if (report.getStartTime() == 0)
setupStart = Math.min(setupStart, report.getStartTime());
for (TaskReport report : mapReports)
taskIdToType.put(report.getTaskID().toString(),"MAP");
if (report.getFinishTime() == 0 || report.getStartTime() == 0)
minStart = Math.min(minStart, report.getStartTime());
mapStats.addValue(report.getFinishTime() - report.getStartTime());
for (TaskReport report : reduceReports)
taskIdToType.put(report.getTaskID().toString(),"REDUCE");
if (report.getFinishTime() == 0 || report.getStartTime() == 0)
maxFinish = Math.max(maxFinish, report.getFinishTime());
reduceStats.addValue(report.getFinishTime() - report.getStartTime());
taskIdToType.put(report.getTaskID().toString(),"CLEANUP");
if (report.getFinishTime() == 0)
cleanupFinish = Math.max(cleanupFinish, report.getFinishTime());/**
* Creates a "status report" for this task. Includes the
* task ID and overall status, plus reports for all the
* component task-threads that have ever been started.
*/
synchronized TaskReport generateSingleReport() {
ArrayList
for (List
diagnostics.addAll(l);
}
TaskReport report = new TaskReport
(getTIPId(), (float)progress, state,
diagnostics.toArray(new String[diagnostics.size()]),
execStartTime, execFinishTime, counters);
return report;
}Job job = ...;
job.waitForCompletion();
TaskReport[] reports = job.getTaskReports(TaskType.MAP);
for(TaskReport report : reports) {
long time = report.getFinishTime() - report.getStartTime();
System.out.println(report.getTaskId() + " took " + time + " millis!");
}@Test(timeout = 5000)
public void testTaskID() throws IOException, InterruptedException {
JobID jobid = new JobID("1014873536921", 6);
TaskID tid = new TaskID(jobid, TaskType.MAP, 0);
org.apache.hadoop.mapred.TaskID tid1 =
org.apache.hadoop.mapred.TaskID.downgrade(tid);
org.apache.hadoop.mapred.TaskReport treport =
new org.apache.hadoop.mapred.TaskReport(tid1, 0.0f,
State.FAILED.toString(), null, TIPStatus.FAILED, 100, 100,
new org.apache.hadoop.mapred.Counters());
Assert
.assertEquals(treport.getTaskId(), "task_1014873536921_0006_m_000000");
Assert.assertEquals(treport.getTaskID().toString(),
"task_1014873536921_0006_m_000000");
}
}TaskReport report = new TaskReport
(getTIPId(), (float)progress, state,
diagnostics.toArray(new String[diagnostics.size()]),
currentStatus, execStartTime, execFinishTime, counters);
if (currentStatus == TIPStatus.RUNNING) {
report.setRunningTaskAttempts(activeTasks.keySet());
} else if (currentStatus == TIPStatus.COMPLETE) {
report.setSuccessfulAttempt(getSuccessfulTaskid());private TaskReport[] getTaskReports(final JobID jobId, TaskType type) throws
IOException {
try {
Job j = getJobUsingCluster(jobId);
if(j == null) {
return EMPTY_TASK_REPORTS;
}
return TaskReport.downgradeArray(j.getTaskReports(type));
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}TaskReport mapReports[] = jc.getMapTaskReports(rJob.getID());
for (TaskReport mapReport : mapReports) {
if (mapReport.getCurrentStatus() == TIPStatus.RUNNING) {
runningTasks.addAll(mapReport.getRunningTaskAttempts());
TaskReport reduceReports[] = jc.getReduceTaskReports(rJob.getID());
for (TaskReport reduceReport : reduceReports) {
if (reduceReport.getCurrentStatus() == TIPStatus.RUNNING) {
runningTasks.addAll(reduceReport.getRunningTaskAttempts());/**Computes average progress per bar*/
private float getMapAvarageProgress(int tasksPerBar, int index
, TaskReport[] reports ) {
float progress = 0f;
int k=0;
for(;k
}
progress /= k;
return progress;
}static TaskReport[] downgradeArray(org.apache.hadoop.
mapreduce.TaskReport[] reports) {
List
for (org.apache.hadoop.mapreduce.TaskReport report : reports) {
ret.add(downgrade(report));
}
return ret.toArray(new TaskReport[0]);
}private void printTaskAttempts(TaskReport report) {
if (report.getCurrentStatus() == TIPStatus.COMPLETE) {
System.out.println(report.getSuccessfulTaskAttempt());
} else if (report.getCurrentStatus() == TIPStatus.RUNNING) {
for (TaskAttemptID t :
report.getRunningTaskAttempts()) {
System.out.println(t);
}
}
}
/**Collection
for(TaskAttemptID attempt: running){
taskAttemptToProgress.put(attempt, report.getProgress());private TaskID getTIPId(MiniMRCluster cluster,
org.apache.hadoop.mapreduce.JobID jobid) throws Exception {
JobClient client = new JobClient(cluster.createJobConf());
JobID jobId = JobID.downgrade(jobid);
TaskReport[] mapReports = null;
TaskID tipId = null;
do { // make sure that the map task is running
Thread.sleep(200);
mapReports = client.getMapTaskReports(jobId);
} while (mapReports.length == 0);
for (TaskReport r : mapReports) {
tipId = r.getTaskID();
break;// because we have only one map
}
return tipId;
}.getMapTaskReports(JobID.downgrade(job.getJobID()))));
for (TaskReport tr : allTaskReports) {
String[] diag = tr.getDiagnostics();
for (String str : diag) {
mat = taskOverLimitPattern.matcher(str);TIPStatus status = report.getCurrentStatus();
if ((state.equals("pending") && status ==TIPStatus.PENDING) ||
(state.equals("running") && status ==TIPStatus.RUNNING) ||@SuppressWarnings("deprecation")
private long getTaskCounterUsage (JobClient client, JobID id, int numReports,
int taskId, TaskType type)
throws Exception {
TaskReport[] reports = null;
if (TaskType.MAP.equals(type)) {
reports = client.getMapTaskReports(id);
} else if (TaskType.REDUCE.equals(type)) {
reports = client.getReduceTaskReports(id);
}
assertNotNull("No reports found for task type '" + type.name()
+ "' in job " + id, reports);
// make sure that the total number of reports match the expected
assertEquals("Mismatch in task id", numReports, reports.length);
Counters counters = reports[taskId].getCounters();
return counters.getCounter(TaskCounter.COMMITTED_HEAP_BYTES);
}