代码如下:
第一个mapper:FindFriendMapTaskByOne
package com.gec.demo;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
import java.io.PrintStream;
public class FindFriendMapTaskByOne extends Mapper
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line=value.toString();
String[] datas=line.split(":");
String user=datas[0];
String []friends=datas[1].split(",");
for (String friend : friends) {
context.write(new Text(friend),new Text(user));
}
}
}
第一个reducer:
package com.gec.demo;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FindFriendReducerTaskByOne extends Reducer
@Override
protected void reduce(Text key, Iterable
StringBuffer strBuf=new StringBuffer();
for (Text value : values) {
strBuf.append(value).append("-");
}
context.write(key,new Text(strBuf.toString()));
}
}
第一个job
package com.gec.demo;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FindFriendJobByOne {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration cOnfiguration=new Configuration();
Job job=Job.getInstance(configuration);
//设置Driver类
job.setJarByClass(FindFriendJobByOne.class);
//设置运行那个map task
job.setMapperClass(FindFriendMapTaskByOne .class);
//设置运行那个reducer task
job.setReducerClass(FindFriendReducerTaskByOne .class);
//设置map task的输出key的数据类型
job.setMapOutputKeyClass(Text.class);
//设置map task的输出value的数据类型
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
//指定要处理的数据所在的位置
FileInputFormat.setInputPaths(job, "D://Bigdata//4、mapreduce//day05//homework//friendhomework.txt");
//指定处理完成之后的结果所保存的位置
FileOutputFormat.setOutputPath(job, new Path("D://Bigdata//4、mapreduce//day05//homework//output"));
//向yarn集群提交这个job
boolean res = job.waitForCompletion(true);
System.exit(res?0:1);
}
}
得出结果:
第二个mapper:
package com.gec.demo;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FindFriendMapTaskByTwo extends Mapper
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line=value.toString();
String []datas=line.split("\\t");
String []userlist=datas[1].split("-");
for (int i=0;i
for (int j=i+1;j
String user1=userlist[i];
String user2=userlist[j];
String friendkey=user1+"-"+user2;
context.write(new Text(friendkey),new Text(datas[0]));
}
}
}
}
第二个reducer:
package com.gec.demo;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FindFriendReducerTaskByTwo extends Reducer
@Override
protected void reduce(Text key, Iterable
StringBuffer stringBuffer=new StringBuffer();
for (Text value : values) {
stringBuffer.append(value).append(",");
}
context.write(key,new Text(stringBuffer.toString()));
}
}
第二个job:
package com.gec.demo;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FindFriendJobByTwo {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration cOnfiguration=new Configuration();
Job job=Job.getInstance(configuration);
//设置Driver类
job.setJarByClass(FindFriendJobByTwo.class);
//设置运行那个map task
job.setMapperClass(FindFriendMapTaskByTwo .class);
//设置运行那个reducer task
job.setReducerClass(FindFriendReducerTaskByTwo .class);
//设置map task的输出key的数据类型
job.setMapOutputKeyClass(Text.class);
//设置map task的输出value的数据类型
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
//指定要处理的数据所在的位置
FileInputFormat.setInputPaths(job, "D://Bigdata//4、mapreduce//day05//homework//friendhomework3.txt");
//指定处理完成之后的结果所保存的位置
FileOutputFormat.setOutputPath(job, new Path("D://Bigdata//4、mapreduce//day05//homework//output"));
//向yarn集群提交这个job
boolean res = job.waitForCompletion(true);
System.exit(res?0:1);
}
}
得出结果:
1)需求:
订单数据表t_order:
id | pid | amount |
1001 | 01 | 1 |
1002 | 02 | 2 |
1003 | 03 | 3 |
商品信息表t_product
id | pname |
01 | 小米 |
02 | 华为 |
03 | 格力 |
将商品信息表中数据根据商品id合并到订单数据表中。
最终数据形式:
id | pname | amount |
1001 | 小米 | 1 |
1001 | 小米 | 1 |
1002 | 华为 | 2 |
1002 | 华为 | 2 |
1003 | 格力 | 3 |
1003 | 格力 | 3 |
通过将关联条件作为map输出的key,将两表满足join条件的数据并携带数据所来源的文件信息,发往同一个reduce task,在reduce中进行数据的串联。
1)创建商品和订合并后的bean类
package com.gec.mapreduce.table; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.io.Writable;
public class TableBean implements Writable { private String order_id; // 订单id private String p_id; // 产品id private int amount; // 产品数量 private String pname; // 产品名称 private String flag;// 表的标记
public TableBean() { super(); }
public TableBean(String order_id, String p_id, int amount, String pname, String flag) { super(); this.order_id = order_id; this.p_id = p_id; this.amount = amount; this.pname = pname; this.flag = flag; }
public String getFlag() { return flag; }
public void setFlag(String flag) { this.flag = flag; }
public String getOrder_id() { return order_id; }
public void setOrder_id(String order_id) { this.order_id = order_id; }
public String getP_id() { return p_id; }
public void setP_id(String p_id) { this.p_id = p_id; }
public int getAmount() { return amount; }
public void setAmount(int amount) { this.amount = amount; }
public String getPname() { return pname; }
public void setPname(String pname) { this.pname = pname; }
@Override public void write(DataOutput out) throws IOException { out.writeUTF(order_id); out.writeUTF(p_id); out.writeInt(amount); out.writeUTF(pname); out.writeUTF(flag); }
@Override public void readFields(DataInput in) throws IOException { this.order_id = in.readUTF(); this.p_id = in.readUTF(); this.amount = in.readInt(); this.pname = in.readUTF(); this.flag = in.readUTF(); }
@Override public String toString() { return order_id + "\\t" + p_id + "\\t" + amount + "\\t" ; } } |
2)编写TableMapper程序
package com.gec.mapreduce.table; import java.io.IOException; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileSplit;
public class TableMapper extends Mapper TableBean bean = new TableBean(); Text k = new Text();
@Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 1 获取输入文件类型 FileSplit split = (FileSplit) context.getInputSplit(); String name = split.getPath().getName();
// 2 获取输入数据 String line = value.toString();
// 3 不同文件分别处理 if (name.startsWith("order")) {// 订单表处理 // 3.1 切割 String[] fields = line.split(",");
// 3.2 封装bean对象 bean.setOrder_id(fields[0]); bean.setP_id(fields[1]); bean.setAmount(Integer.parseInt(fields[2])); bean.setPname(""); bean.setFlag("0");
k.set(fields[1]); }else {// 产品表处理 // 3.3 切割 String[] fields = line.split(",");
// 3.4 封装bean对象 bean.setP_id(fields[0]); bean.setPname(fields[1]); bean.setFlag("1"); bean.setAmount(0); bean.setOrder_id("");
k.set(fields[0]); } // 4 写出 context.write(k, bean); } } |
3)编写TableReducer程序
package com.gec.mapreduce.table; import java.io.IOException; import java.util.ArrayList; import org.apache.commons.beanutils.BeanUtils; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer;
public class TableReducer extends Reducer
@Override protected void reduce(Text key, Iterable throws IOException, InterruptedException {
// 1准备存储订单的集合 ArrayList // 2 准备bean对象 TableBean pdBean = new TableBean();
for (TableBean bean : values) {
if ("0".equals(bean.getFlag())) {// 订单表 // 拷贝传递过来的每条订单数据到集合中 TableBean orderBean = new TableBean(); try { BeanUtils.copyProperties(orderBean, bean); } catch (Exception e) { e.printStackTrace(); }
orderBeans.add(orderBean); } else {// 产品表 try { // 拷贝传递过来的产品表到内存中 BeanUtils.copyProperties(pdBean, bean); } catch (Exception e) { e.printStackTrace(); } } }
// 3 表的拼接 for(TableBean bean:orderBeans){ bean.setP_id(pdBean.getPname());
// 4 数据写出去 context.write(bean, NullWritable.get()); } } } |
4)编写TableDriver程序
package com.gec.mapreduce.table;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class TableDriver {
public static void main(String[] args) throws Exception { // 1 获取配置信息,或者job对象实例 Configuration cOnfiguration= new Configuration(); Job job = Job.getInstance(configuration);
// 2 指定本程序的jar包所在的本地路径 job.setJarByClass(TableDriver.class);
// 3 指定本业务job要使用的mapper/Reducer业务类 job.setMapperClass(TableMapper.class); job.setReducerClass(TableReducer.class);
// 4 指定mapper输出数据的kv类型 job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(TableBean.class);
// 5 指定最终输出的数据的kv类型 job.setOutputKeyClass(TableBean.class); job.setOutputValueClass(NullWritable.class);
// 6 指定job的输入原始文件所在目录 FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7 将job中配置的相关参数,以及job所用的java类所在的jar包, 提交给yarn去运行 boolean result = job.waitForCompletion(true); System.exit(result ? 0 : 1); } } |
3)运行程序查看结果
1001 小米 1 1001 小米 1 1002 华为 2 1002 华为 2 1003 格力 3 1003 格力 3 |
缺点:这种方式中,合并的操作是在reduce阶段完成,reduce端的处理压力太大,map节点的运算负载则很低,资源利用率不高,且在reduce阶段极易产生数据倾斜
解决方案: map端实现数据合并
1)分析
适用于关联表中有小表的情形;
可以将小表分发到所有的map节点,这样,map节点就可以在本地对自己所读到的大表数据进行合并并输出最终结果,可以大大提高合并操作的并发度,加快处理速度。
2)实操案例
(1)先在