本文整理了Java中org.apache.hadoop.mapred.JobConf.getPartitionerClass()
方法的一些代码示例,展示了JobConf.getPartitionerClass()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。JobConf.getPartitionerClass()
方法的具体详情如下:
包路径:org.apache.hadoop.mapred.JobConf
类名称:JobConf
方法名:getPartitionerClass
[英]Get the Partitioner used to partition Mapper-outputs to be sent to the Reducers.
[中]获取用于对映射器输出进行分区以发送到还原器的分区器。
代码示例来源:origin: apache/ignite
/** {@inheritDoc} */
@Override public HadoopPartitioner partitioner() throws IgniteCheckedException {
Class> partClsOld = jobConf().getClass("mapred.partitioner.class", null);
if (partClsOld != null)
return new HadoopV1Partitioner(jobConf().getPartitionerClass(), jobConf());
try {
return new HadoopV2Partitioner(jobCtx.getPartitionerClass(), jobConf());
}
catch (ClassNotFoundException e) {
throw new IgniteCheckedException(e);
}
}
代码示例来源:origin: apache/hbase
/**
* @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)
*/
public static void addDependencyJars(JobConf job) throws IOException {
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(
job,
job.getMapOutputKeyClass(),
job.getMapOutputValueClass(),
job.getOutputKeyClass(),
job.getOutputValueClass(),
job.getPartitionerClass(),
job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
job.getCombinerClass());
}
}
代码示例来源:origin: apache/hive
jc.getPartitionerClass(), null);
代码示例来源:origin: apache/drill
jc.getPartitionerClass(), null);
代码示例来源:origin: Impetus/jumbune
/**
* This method will return the Partitioner object which is used for
* partitioning map output
*
* @param context
* - Map context
* @return Partitioner object
*/
@SuppressWarnings(RAW_TYPES)
public static void setPartitioner(JobConf conf) {
org.apache.hadoop.mapred.Partitioner oldPartitiOner= null;
Class paritiOnerClass= null;
try {
paritiOnerClass= conf.getPartitionerClass();
oldPartitiOner= (org.apache.hadoop.mapred.Partitioner) paritionerClass
.newInstance();
OLD_PARTITIONER_THREAD_LOCAL.set(oldPartitioner);
} catch (InstantiationException e) {
LOGGER.error(ErrorCodesAndMessages.MESSAGE_PARTITIONER_NOT_SET
+ " : " + e);
} catch (IllegalAccessException e) {
LOGGER.error(ErrorCodesAndMessages.MESSAGE_PARTITIONER_NOT_SET
+ " : " + e);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred
@SuppressWarnings("unchecked")
OldOutputCollector(MapOutputCollector
numPartitiOns= conf.getNumReduceTasks();
if (numPartitions > 1) {
partitiOner= (Partitioner
ReflectionUtils.newInstance(conf.getPartitionerClass(), conf);
} else {
partitiOner= new Partitioner
@Override
public void configure(JobConf job) { }
@Override
public int getPartition(K key, V value, int numPartitions) {
return numPartitions - 1;
}
};
}
this.collector = collector;
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
@SuppressWarnings("unchecked")
OldOutputCollector(MapOutputCollector
numPartitiOns= conf.getNumReduceTasks();
if (numPartitions > 1) {
partitiOner= (Partitioner
ReflectionUtils.newInstance(conf.getPartitionerClass(), conf);
} else {
partitiOner= new Partitioner
@Override
public void configure(JobConf job) { }
@Override
public int getPartition(K key, V value, int numPartitions) {
return numPartitions - 1;
}
};
}
this.collector = collector;
}
代码示例来源:origin: io.hops/hadoop-mapreduce-client-core
@SuppressWarnings("unchecked")
OldOutputCollector(MapOutputCollector
numPartitiOns= conf.getNumReduceTasks();
if (numPartitions > 1) {
partitiOner= (Partitioner
ReflectionUtils.newInstance(conf.getPartitionerClass(), conf);
} else {
partitiOner= new Partitioner
@Override
public void configure(JobConf job) { }
@Override
public int getPartition(K key, V value, int numPartitions) {
return numPartitions - 1;
}
};
}
this.collector = collector;
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
@SuppressWarnings("unchecked")
OldOutputCollector(MapOutputCollector
numPartitiOns= conf.getNumReduceTasks();
if (numPartitions > 0) {
partitiOner= (Partitioner
ReflectionUtils.newInstance(conf.getPartitionerClass(), conf);
} else {
partitiOner= new Partitioner
@Override
public void configure(JobConf job) { }
@Override
public int getPartition(K key, V value, int numPartitions) {
return -1;
}
};
}
this.collector = collector;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core
@SuppressWarnings("unchecked")
OldOutputCollector(MapOutputCollector
numPartitiOns= conf.getNumReduceTasks();
if (numPartitions > 1) {
partitiOner= (Partitioner
ReflectionUtils.newInstance(conf.getPartitionerClass(), conf);
} else {
partitiOner= new Partitioner
@Override
public void configure(JobConf job) { }
@Override
public int getPartition(K key, V value, int numPartitions) {
return numPartitions - 1;
}
};
}
this.collector = collector;
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-core
@SuppressWarnings("unchecked")
OldOutputCollector(MapOutputCollector
numPartitiOns= conf.getNumReduceTasks();
if (numPartitions > 1) {
partitiOner= (Partitioner
ReflectionUtils.newInstance(conf.getPartitionerClass(), conf);
} else {
partitiOner= new Partitioner
@Override
public void configure(JobConf job) { }
@Override
public int getPartition(K key, V value, int numPartitions) {
return numPartitions - 1;
}
};
}
this.collector = collector;
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
conf.setMapRunnerClass(PipesMapRunner.class);
setJavaPartitioner(conf, conf.getPartitionerClass());
conf.setPartitionerClass(PipesPartitioner.class);
代码示例来源:origin: io.hops/hadoop-mapreduce-client-core
conf.setMapRunnerClass(PipesMapRunner.class);
setJavaPartitioner(conf, conf.getPartitionerClass());
conf.setPartitionerClass(PipesPartitioner.class);
代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-core
conf.setMapRunnerClass(PipesMapRunner.class);
setJavaPartitioner(conf, conf.getPartitionerClass());
conf.setPartitionerClass(PipesPartitioner.class);
代码示例来源:origin: co.cask.hbase/hbase
/**
* @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(Job)
*/
public static void addDependencyJars(JobConf job) throws IOException {
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
job,
org.apache.zookeeper.ZooKeeper.class,
com.google.common.base.Function.class,
com.google.protobuf.Message.class,
job.getMapOutputKeyClass(),
job.getMapOutputValueClass(),
job.getOutputKeyClass(),
job.getOutputValueClass(),
job.getPartitionerClass(),
job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
job.getCombinerClass());
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred
conf.setMapRunnerClass(PipesMapRunner.class);
setJavaPartitioner(conf, conf.getPartitionerClass());
conf.setPartitionerClass(PipesPartitioner.class);
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
conf.setMapRunnerClass(PipesMapRunner.class);
setJavaPartitioner(conf, conf.getPartitionerClass());
conf.setPartitionerClass(PipesPartitioner.class);
代码示例来源:origin: com.aliyun.hbase/alihbase-mapreduce
/**
* @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)
*/
public static void addDependencyJars(JobConf job) throws IOException {
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(
job,
job.getMapOutputKeyClass(),
job.getMapOutputValueClass(),
job.getOutputKeyClass(),
job.getOutputValueClass(),
job.getPartitionerClass(),
job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
job.getCombinerClass());
}
}
代码示例来源:origin: org.apache.hbase/hbase-mapreduce
/**
* @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)
*/
public static void addDependencyJars(JobConf job) throws IOException {
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(
job,
job.getMapOutputKeyClass(),
job.getMapOutputValueClass(),
job.getOutputKeyClass(),
job.getOutputValueClass(),
job.getPartitionerClass(),
job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
job.getCombinerClass());
}
}
代码示例来源:origin: harbby/presto-connectors
/**
* @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)
*/
public static void addDependencyJars(JobConf job) throws IOException {
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
job,
// when making changes here, consider also mapreduce.TableMapReduceUtil
// pull job classes
job.getMapOutputKeyClass(),
job.getMapOutputValueClass(),
job.getOutputKeyClass(),
job.getOutputValueClass(),
job.getPartitionerClass(),
job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
job.getCombinerClass());
}
}