学习
学习
public static class Map extends Mapper{
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String line = value.toString();
try {
String[] lineSplit = line.split("\t");
context.write(new Text(lineSplit[0]), new Text(""));
context.write(new Text("uniq") ,new Text(lineSplit[0]) );
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
context.getCounter(Counter.LINESKIP).increment(1);
return;
}
}
}
public static class Reduce extends Reducer{
private Setcount = new HashSet ();
public void reduce(Text key, Iterablevalues, Context context)
throws IOException, InterruptedException {
for(Text value:values){
count.add(value.toString());
}
context.write("uniq", new Text(count.size()+""));
}
}
------------------------- 这个问题纠结我2周了,这个方面的学习资料太少了,我的map和reduce是这样写的,但是数据量大一些,就会内存溢出,我想我这个思路是错误的
你说的 “必须用reduce去group后的key才能得到去重效果 ”,这个 map和reduce是具体怎么写的啊?
map按第一列为key,value无所谓
reduce class中初始化一个计数器
每个reduce方法中计数器每次加一
reduce 的cleanup方法中commit计数器就可以了
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class wzl189_distinct {
public static class MyMapper extends
Mapper {
Text outKey = new Text();
@Override
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
String tmp[] = value.toString().split(" ");
if (tmp.length != 2)
return;
outKey.set(tmp[0]);
context.write(outKey, NullWritable.get());
}
}
public static class MyReducer extends
Reducer{
long myCount = 0l;
@Override
public void reduce(Text key, Iterablevalues,
Context context) throws IOException, InterruptedException {
++myCount;
}
@Override
public void cleanup(Context context) throws IOException,
InterruptedException {
context.write(new LongWritable(myCount), NullWritable.get());
};
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
if (args.length != 2) {
System.err.println("Usage:");
System.exit(2);
}
conf.set("mapred.child.java.opts", "-Xmx350m -Xmx1024m");
@SuppressWarnings("deprecation")
Job job = new Job(conf, "wzl189_distinct");
job.setNumReduceTasks(1);
job.setInputFormatClass(TextInputFormat.class);
job.setJarByClass(wzl189_distinct.class);
job.setMapperClass(MyMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(MyReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class wzl189_distinct {
public static class MyMapper extends
Mapper {
Text outKey = new Text();
@Override
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
String tmp[] = value.toString().split(" ");
if (tmp.length != 2)
return;
outKey.set(tmp[0]);
context.write(outKey, NullWritable.get());
}
}
public static class MyReducer extends
Reducer{
long myCount = 0l;
@Override
public void reduce(Text key, Iterablevalues,
Context context) throws IOException, InterruptedException {
++myCount;
}
@Override
public void cleanup(Context context) throws IOException,
InterruptedException {
context.write(new LongWritable(myCount), NullWritable.get());
};
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
if (args.length != 2) {
System.err.println("Usage:");
System.exit(2);
}
conf.set("mapred.child.java.opts", "-Xmx350m -Xmx1024m");
@SuppressWarnings("deprecation")
Job job = new Job(conf, "wzl189_distinct");
job.setNumReduceTasks(1);
job.setInputFormatClass(TextInputFormat.class);
job.setJarByClass(wzl189_distinct.class);
job.setMapperClass(MyMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(MyReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
reduce阶段只用一个计数器就行了