标签:.text idt ack pac art tab key copy catch
需求:
利用MapReduce程序,实现SQL语句中的join关联查询。
订单数据表order:
id | date | pid | amount |
1001 | 20150710 | P0001 | 2 |
1002 | 20150710 | P0001 | 3 |
1002 | 20150710 | P0002 | 3 |
1003 | 20150710 | P0003 | 4 |
商品信息表product:
pid | pname | category_id | price |
P0001 | 小米6 | 1000 | 2499 |
P0002 | 锤子T3 | 1001 | 2500 |
P0003 | 三星S8 | 1002 | 6999 |
假如数据量巨大,两表的数据是以文件的形式存储在HDFS中,需要用mapreduce程序来实现一下SQL查询运算:
select a.id,a.date,b.name,b.category_id,b.price from t_order a join t_product b on a.pid = b.id
分析:
通过将关联的条件作为map输出的key,将两表满足join条件的数据并携带数据所来源的文件信息,发往同一个reduce task,在reduce中进行数据的串联。
实现:
首先,我们将表中的数据转换成我们需要的格式:
order.txt:
1001,20150710,P0001,2 1002,20150710,P0001,3 1002,20150710,P0002,3 1003,20150710,P0003,4
product.txt:
P0001,小米6,1000,2499 P0002,锤子T3,1001,2500 P0003,三星S8,1002,6999
并且导入到HDFS的/join/srcdata目录下面。
因为我们有两种格式的文件,所以在map阶段需要根据文件名进行一下判断,不同的文案进行不同的处理。同理,在reduce阶段我们也要针对同一key(pid)的不同种类数据进行判断,是通过判断id是否为空字符串进行判断的。
InfoBean.java:
package com.darrenchan.mr.bean; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.io.Writable; /** * id date pid amount pname category_id price * * @author chenchi * */ public class InfoBean implements Writable { private String id;// 订单id private String date; private String pid;// 产品id private String amount; private String pname; private String category_id; private String price; public InfoBean() { } public InfoBean(String id, String date, String pid, String amount, String pname, String category_id, String price) { super(); this.id = id; this.date = date; this.pid = pid; this.amount = amount; this.pname = pname; this.category_id = category_id; this.price = price; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getDate() { return date; } public void setDate(String date) { this.date = date; } public String getPid() { return pid; } public void setPid(String pid) { this.pid = pid; } public String getAmount() { return amount; } public void setAmount(String amount) { this.amount = amount; } public String getPname() { return pname; } public void setPname(String pname) { this.pname = pname; } public String getCategory_id() { return category_id; } public void setCategory_id(String category_id) { this.category_id = category_id; } public String getPrice() { return price; } public void setPrice(String price) { this.price = price; } @Override public String toString() { return "InfoBean [id=" + id + ", date=" + date + ", pid=" + pid + ", amount=" + amount + ", pname=" + pname + ", category_id=" + category_id + ", price=" + price + "]"; } /** * id date pid amount pname category_id price */ @Override public void readFields(DataInput in) throws IOException { id = in.readUTF(); date = in.readUTF(); pid = in.readUTF(); amount = in.readUTF(); pname = in.readUTF(); category_id = in.readUTF(); price = in.readUTF(); } @Override public void write(DataOutput out) throws IOException { out.writeUTF(id); out.writeUTF(date); out.writeUTF(pid); out.writeUTF(amount); out.writeUTF(pname); out.writeUTF(category_id); out.writeUTF(price); } }
Join.java:
package com.darrenchan.mr.join; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.List; import org.apache.commons.beanutils.BeanUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import com.darrenchan.mr.bean.InfoBean; public class Join { /** * Mapper类 * @author chenchi * */ public static class JoinMapper extends Mapper<LongWritable, Text, Text, InfoBean>{ //提前在这里new一个对象,剩下的就是改变它的值,不至于在map方法中创建出大量的InfoBean对象 InfoBean infoBean = new InfoBean(); Text text = new Text();//理由同上 @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { //首先,要判断文件名称,读的是订单数据还是商品数据 FileSplit inputSplit = (FileSplit) context.getInputSplit(); String name = inputSplit.getPath().getName();//文件名称 if(name.startsWith("order")){//来自订单数据 String line = value.toString(); String[] fields = line.split(","); String id = fields[0]; String date = fields[1]; String pid = fields[2]; String amount = fields[3]; infoBean.setId(id); infoBean.setDate(date); infoBean.setPid(pid); infoBean.setAmount(amount); //对于订单数据来说,后面三个属性都置为"" //之所以不置为null,是因为其要进行序列化和反序列化 infoBean.setPname(""); infoBean.setCategory_id(""); infoBean.setPrice(""); text.set(pid); context.write(text, infoBean); }else{//来自商品数据 String line = value.toString(); String[] fields = line.split(","); String pid = fields[0]; String pname = fields[1]; String category_id = fields[2]; String price = fields[3]; infoBean.setPname(pname); infoBean.setCategory_id(category_id); infoBean.setPrice(price); infoBean.setPid(pid); //对于订单数据来说,后面三个属性都置为"" //之所以不置为null,是因为其要进行序列化和反序列化 infoBean.setId(""); infoBean.setDate(""); infoBean.setAmount(""); text.set(pid); context.write(text, infoBean); } } } public static class JoinReducer extends Reducer<Text, InfoBean, InfoBean, NullWritable>{ //订单数据中一个pid会有多条数据 //商品数据中一个pid只有一条 @Override protected void reduce(Text key, Iterable<InfoBean> values, Context context) throws IOException, InterruptedException { List<InfoBean> list = new ArrayList<InfoBean>();//存储订单数据中的多条 InfoBean info = new InfoBean();//存储商品数据中的一条 for (InfoBean infoBean : values) { if(!"".equals(infoBean.getId())){//来自订单数据 InfoBean infoBean2 = new InfoBean(); try { BeanUtils.copyProperties(infoBean2, infoBean); } catch (Exception e) { e.printStackTrace(); } list.add(infoBean2); }else{//来自商品数据 try { BeanUtils.copyProperties(info, infoBean); } catch (IllegalAccessException | InvocationTargetException e) { e.printStackTrace(); } } } for (InfoBean infoBean : list) { infoBean.setPname(info.getPname()); infoBean.setCategory_id(info.getCategory_id()); infoBean.setPrice(info.getPrice()); context.write(infoBean, NullWritable.get()); } } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = Job.getInstance(conf); job.setJarByClass(Join.class); job.setMapperClass(JoinMapper.class); job.setReducerClass(JoinReducer.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(InfoBean.class); job.setOutputKeyClass(InfoBean.class); job.setOutputValueClass(NullWritable.class); FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); System.exit(job.waitForCompletion(true) ? 0 : 1); } }
注:这里有一个地方需要注意,就是reduce方法的Iterable<InfoBean> values,一定要new 新对象,不能直接赋值,因为迭代器的内容在不断变化。
执行指令:hadoop jar mywc.jar cn.darrenchan.hadoop.mr.wordcount.WCRunner /wc/src /wc/output
运行效果:
标签:.text idt ack pac art tab key copy catch
原文地址:http://www.cnblogs.com/DarrenChan/p/6754223.html