本文整理了Java中org.apache.hadoop.mapred.JobStatus
类的一些代码示例,展示了JobStatus
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。JobStatus
类的具体详情如下:
包路径:org.apache.hadoop.mapred.JobStatus
类名称:JobStatus
[英]Describes the current status of a job. This is not intended to be a comprehensive piece of data. For that, look at JobProfile.
[中]描述作业的当前状态。这不是一个全面的数据。为此,请查看JobProfile。
代码示例来源:origin: apache/hive
public List
int numRecords, boolean showDetails)
throws NotAuthorizedException, BadParam, IOException, InterruptedException {
UserGroupInformation ugi = null;
WebHCatJTShim tracker = null;
ArrayList
try {
ugi = UgiFactory.getUgi(user);
tracker = ShimLoader.getHadoopShims().getWebHCatShim(appConf, ugi);
JobStatus[] jobs = tracker.getAllJobs();
if (jobs != null) {
for (JobStatus job : jobs) {
String id = job.getJobID().toString();
if (showall || user.equals(job.getUsername()))
ids.add(id);
}
}
} catch (IllegalStateException e) {
throw new BadParam(e.getMessage());
} finally {
if (tracker != null)
tracker.close();
if (ugi != null)
FileSystem.closeAllForUGI(ugi);
}
return getJobStatus(ids, user, showall, jobId, numRecords, showDetails);
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core
private static JobStatus createTestJobStatus(String jobId, int state) {
return new JobStatus(
JobID.forName(jobId), 0.5f, 0.0f,
state, "root", "TestJobEndNotifier", null, null);
}
代码示例来源:origin: apache/hive
private void logJob(String logDir, String jobID, PrintWriter listWriter)
throws IOException {
RunningJob rj = jobClient.getJob(JobID.forName(jobID));
String jobURLString = rj.getTrackingURL();
fs.mkdirs(jobDir);
listWriter.println("job: " + jobID + "(" + "name=" + rj.getJobName() + ","
+ "status=" + JobStatus.getJobRunState(rj.getJobState()) + ")");
代码示例来源:origin: apache/hive
/**
* Grab a handle to a job that is already known to the JobTracker.
*
* @return Profile of the job, or null if not found.
*/
public JobProfile getJobProfile(JobID jobid)
throws IOException {
RunningJob rj = getJob(jobid);
if(rj == null) {
return null;
}
JobStatus jobStatus = rj.getJobStatus();
return new JobProfile(jobStatus.getUsername(), jobStatus.getJobID(),
jobStatus.getJobFile(), jobStatus.getTrackingUrl(), jobStatus.getJobName());
}
代码示例来源:origin: apache/hive
org.apache.hadoop.mapred.JobID.forName(childJobIdString);
LOG.info(String.format("Reconnecting to an existing job %s", childJobIdString));
if (jobStatus.isJobComplete()) {
LOG.info(String.format("Child job %s completed", childJobIdString));
int exitCode = 0;
if (jobStatus.getRunState() != org.apache.hadoop.mapred.JobStatus.SUCCEEDED) {
exitCode = 1;
jobStatus.mapProgress()*100, jobStatus.reduceProgress()*100);
updateJobStatePercentAndChildId(conf, context.getJobID().toString(), percent, null);
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-common
this.systemJobFile = new Path(systemJobDir, "job.xml");
this.id = jobid;
JobConf cOnf= new JobConf(systemJobFile);
this.localFs = FileSystem.getLocal(conf);
String user = UserGroupInformation.getCurrentUser().getShortUserName();
this.localJobDir = localFs.makeQualified(new Path(
new Path(conf.getLocalPath(jobDir), user), jobid.toString()));
this.localJobFile = new Path(this.localJobDir, id + ".xml");
OutputStream out = localFs.create(localJobFile);
try {
conf.writeXml(out);
} finally {
out.close();
profile = new JobProfile(job.getUser(), id, systemJobFile.toString(),
"http://localhost:8080/", job.getJobName());
status = new JobStatus(id, 0.0f, 0.0f, JobStatus.RUNNING,
profile.getUser(), profile.getJobName(), profile.getJobFile(),
profile.getURL().toString());
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient
mr = createMiniClusterWithCapacityScheduler();
JobConf job = new JobConf(mr.getConfig());
FileOutputFormat.setOutputPath(job, outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
JobClient client = new JobClient(mr.getConfig());
assertFalse(runningJob.isRetired());
assertEquals( runningJob.getFailureInfo(),"");
assertEquals(runningJob.getJobStatus().getJobName(), "N/A");
assertEquals(status.getActiveTrackerNames().size(), 2);
assertEquals(status.getBlacklistedTrackers(), 0);
assertEquals(status.getBlacklistedTrackerNames().size(), 0);
assertTrue(client.getJobsFromQueue("default")[0].getJobFile().endsWith(
"/job.xml"));
.getJobStatus().getJobID());
assertEquals("Expected matching startTimes", rj.getJobStatus()
.getStartTime(), client.getJob(jobId).getJobStatus().getStartTime());
} finally {
if (fileSys != null) {
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient
mr = createMiniClusterWithCapacityScheduler();
JobConf job = new JobConf(mr.getConfig());
FileOutputFormat.setOutputPath(job, outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setMapperClass(IdentityMapper.class);
job.setNumReduceTasks(0);
JobClient client = new JobClient(mr.getConfig());
RunningJob rj = client.submitJob(job);
JobID jobId = rj.getID();
assertEquals("Expected matching JobIDs", jobId, client.getJob(jobId)
.getJobStatus().getJobID());
assertEquals("Expected matching startTimes", rj.getJobStatus()
.getStartTime(), client.getJob(jobId).getJobStatus()
.getStartTime());
} finally {
if (fileSys != null) {
代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test
public void testCurrentJHParser() throws Exception {
final Configuration cOnf= new Configuration();
final FileSystem lfs = FileSystem.getLocal(conf);
lfs.getUri(), lfs.getWorkingDirectory());
conf.setInt(TTConfig.TT_REDUCE_SLOTS, 1);
MiniMRCluster mrCluster = new MiniMRCluster(1, "file:///", 1, null, null,
new JobConf(conf));
JobClient jc = new JobClient(jConf);
String user = jc.getAllJobs()[0].getUsername();
代码示例来源:origin: com.facebook.hadoop/hadoop-core
JobID jobId = job.getStatus().getJobID();
Path jobStatusFile = getInfoFilePath(jobId);
try {
FSDataOutputStream dataOut = fs.create(jobStatusFile);
job.getStatus().write(dataOut);
ex.getMessage(), ex);
try {
fs.delete(jobStatusFile, true);
代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test
clock.advance(600);
TaskAttemptID[] tid = new TaskAttemptID[2];
JobConf cOnf= new JobConf();
conf.setNumMapTasks(1);
conf.setNumReduceTasks(0);
conf.set(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, "1");
conf.set(MRJobConfig.SETUP_CLEANUP_NEEDED, "false");
JobStatus.SUCCEEDED, job.getStatus().getRunState());
1, jobTracker.getClusterStatus(false).getBlacklistedTrackers());
jobTracker.getClusterStatus(false).getActiveTrackerNames()
.contains(trackers[0]));
0, jobTracker.getClusterStatus(false).getBlacklistedTrackers());
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
+ jobtracker.getInfoPort() + "/jobdetails.jsp?jobid=" + jobid;
this.jobtracker = jobtracker;
this.status = new JobStatus(jobid, 0.0f, 0.0f, JobStatus.PREP);
this.startTime = System.currentTimeMillis();
status.setStartTime(startTime);
this.localFs = FileSystem.getLocal(default_conf);
JobConf default_job_cOnf= new JobConf(default_conf);
this.localJobFile = default_job_conf.getLocalPath(JobTracker.SUBDIR
+"/"+jobid + ".xml");
this.localJarFile = default_job_conf.getLocalPath(JobTracker.SUBDIR
+"/"+ jobid + ".jar");
Path sysDir = new Path(this.jobtracker.getSystemDir());
FileSystem fs = sysDir.getFileSystem(default_conf);
jobFile = new Path(sysDir, jobid + "/job.xml");
fs.copyToLocalFile(jobFile, localJobFile);
cOnf= new JobConf(localJobFile);
this.priority = conf.getJobPriority();
this.status.setJobPriority(this.priority);
this.profile = new JobProfile(conf.getUser(), jobid,
jobFile.toString(), url, conf.getJobName(),
String jarFile = conf.getJar();
if (jarFile != null) {
fs.copyToLocalFile(new Path(jarFile), localJarFile);
conf.setJar(localJarFile.toString());
代码示例来源:origin: com.facebook.hadoop/hadoop-core
public Job(JobID jobid, JobConf conf) throws IOException {
this.doSequential =
conf.getBoolean("mapred.localrunner.sequential", true);
this.id = jobid;
this.mapoutputFile = new MapOutputFile(jobid);
this.mapoutputFile.setConf(conf);
this.localFile = new JobConf(conf).getLocalPath(jobDir+id+".xml");
this.localFs = FileSystem.getLocal(conf);
persistConf(this.localFs, this.localFile, conf);
this.job = new JobConf(localFile);
profile = new JobProfile(job.getUser(), id, localFile.toString(),
"http://localhost:8080/", job.getJobName());
status = new JobStatus(id, 0.0f, 0.0f, JobStatus.RUNNING);
jobs.put(id, this);
numSlots = conf.getInt(LOCAL_RUNNER_SLOTS, DEFAULT_LOCAL_RUNNER_SLOTS);
executor = Executors.newFixedThreadPool(numSlots);
int handlerCount = numSlots;
umbilicalServer =
RPC.getServer(this, LOCALHOST, 0, handlerCount, false, conf);
umbilicalServer.start();
umbilicalPort = umbilicalServer.getListenerAddress().getPort();
this.start();
}
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
public Job(JobID jobid, JobConf conf) throws IOException {
this.file = new Path(getSystemDir(), jobid + "/job.xml");
this.id = jobid;
this.mapoutputFile = new MapOutputFile(jobid);
this.mapoutputFile.setConf(conf);
this.localFile = new JobConf(conf).getLocalPath(jobDir+id+".xml");
this.localFs = FileSystem.getLocal(conf);
fs.copyToLocalFile(file, localFile);
this.job = new JobConf(localFile);
profile = new JobProfile(job.getUser(), id, file.toString(),
"http://localhost:8080/", job.getJobName());
status = new JobStatus(id, 0.0f, 0.0f, JobStatus.RUNNING);
jobs.put(id, this);
this.start();
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-core
/**
* We store a JobProfile and a timestamp for when we last
* acquired the job profile. If the job is null, then we cannot
* perform any of the tasks. The job might be null if the cluster
* has completely forgotten about the job. (eg, 24 hours after the
* job completes.)
*/
public NetworkedJob(JobStatus status, Cluster cluster) throws IOException {
this(status, cluster, new JobConf(status.getJobFile()));
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test
JobConf cOnf= new JobConf(miniMRCluster.createJobConf());
if (doPhysicalMemory) {
conf.setLong(MRJobConfig.MAP_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT);
conf.setLong(MRJobConfig.REDUCE_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT);
} else {
conf.setMemoryForMapTask(PER_TASK_LIMIT);
JobClient jClient = new JobClient(conf);
JobStatus[] jStatus = jClient.getAllJobs();
JobStatus js = jStatus[0]; // Our only job
RunningJob rj = jClient.getJob(js.getJobID());
TaskCompletionEvent[] taskComplEvents = rj.getTaskCompletionEvents(0);
rj.getTaskDiagnostics(tce.getTaskAttemptId());
代码示例来源:origin: cdapio/cdap
/**
* @param runId for which information will be returned.
* @return a {@link MRJobInfo} containing information about a particular MapReduce program run.
* @throws IOException if there is failure to communicate through the JobClient.
* @throws NotFoundException if a Job with the given runId is not found.
*/
@Override
public MRJobInfo getMRJobInfo(Id.Run runId) throws IOException, NotFoundException {
Preconditions.checkArgument(ProgramType.MAPREDUCE.equals(runId.getProgram().getType()));
JobClient jobClient = new JobClient(hConf);
JobStatus[] jobs = jobClient.getAllJobs();
JobStatus thisJob = findJobForRunId(jobs, runId.toEntityId());
RunningJob runningJob = jobClient.getJob(thisJob.getJobID());
if (runningJob == null) {
throw new IllegalStateException(String.format("JobClient returned null for RunId: '%s', JobId: '%s'",
runId, thisJob.getJobID()));
}
Counters counters = runningJob.getCounters();
TaskReport[] mapTaskReports = jobClient.getMapTaskReports(thisJob.getJobID());
TaskReport[] reduceTaskReports = jobClient.getReduceTaskReports(thisJob.getJobID());
return new MRJobInfo(runningJob.mapProgress(), runningJob.reduceProgress(),
groupToMap(counters.getGroup(TaskCounter.class.getName())),
toMRTaskInfos(mapTaskReports), toMRTaskInfos(reduceTaskReports), true);
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test
@SuppressWarnings("deprecation")
public void testKillCompletedJob() throws IOException, InterruptedException {
job = new MyFakeJobInProgress(new JobConf(), jobTracker);
jobTracker.addJob(job.getJobID(), (JobInProgress)job);
job.status.setRunState(JobStatus.SUCCEEDED);
jobTracker.killJob(job.getJobID());
assertTrue("Run state changed when killing completed job" ,
job.status.getRunState() == JobStatus.SUCCEEDED);
}
代码示例来源:origin: LiveRamp/cascading_ext
JobClient jobClient = new JobClient(new InetSocketAddress(jobTracker, port), new Configuration());
for(JobStatus status: jobClient.getAllJobs()){
if(status.getRunState() == JobStatus.RUNNING){
RunningJob job = jobClient.getJob(status.getJobID());
if(job.getJobName().contains(jobsToTarget) || job.getID().toString().contains(jobsToTarget)){
JobID jobid = status.getJobID();
RunningJob runningJob = jobClient.getJob(entry.getKey().getJobID());
runningJob.killTask(entry.getKey(), false);
代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test
JobClient jClient = new JobClient(conf);
JobStatus[] jStatus = jClient.getAllJobs();
JobStatus js = jStatus[0]; // Our only job
RunningJob rj = jClient.getJob(js.getJobID());
TaskCompletionEvent[] taskComplEvents = rj.getTaskCompletionEvents(0);
rj.getTaskDiagnostics(tce.getTaskAttemptId());