码迷,mamicode.com
首页 > 其他好文 > 详细

Hadoop mapreduce 数据去重 数据排序小例子

时间:2014-08-19 19:12:02      阅读:3867      评论:0      收藏:0      [点我收藏+]

标签:hadoop   mapreduce   数据去重   

数据去重:

数据去重,只是让出现的数据仅一次,所以在reduce阶段key作为输入,而对于values-in没有要求,即输入的key直接作为输出的key,并将value置空。具体步骤类似于wordcount:

Tip:输入输出路径配置。

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class Dedup {

	/**
	 * @param XD
	 */
	public static class Map extends Mapper<Object,Text,Text,Text>{
		private static Text line = new Text();
		//map function
		public void map(Object key,Text value,Context context) throws IOException, InterruptedException{
			line  =  value;
			context.write(line, new Text(""));
		}
	}
	public static class Reduce extends Reducer<Text,Text,Text,Text>{
		public void reduce(Text key,Iterable<Text>values,Context context) throws IOException, InterruptedException{
			context.write(key, new Text(""));
		}
	}
	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
		// TODO Auto-generated method stub
		//初始化配置
		Configuration conf = new Configuration();
		
		/*类比与之前默认的args,只是在程序中实现配置,这样不必去eclipse的arguments属性添加参数,
		**但是认为作用一样根据个人喜好设置,如下图所示:
		*/
		//设置输入输出路径
		String[] ioArgs = new String[]{"hdfs://localhost:9000/home/xd/hadoop_tmp/DedupIn",
															"hdfs://localhost:9000/home/xd/hadoop_tmp/DedupOut"};
		
		String[] otherArgs = new GenericOptionsParser(conf,ioArgs).getRemainingArgs();
		
		if(otherArgs.length!=2){
			System.err.println("Usage:Data Deduplication <in> <out>");
			System.exit(2);
		}
		//设置作业
		Job job = new Job(conf,"Dedup Job");
		job.setJarByClass(Dedup.class);
		
		//设置处理map,combine,reduce的类
		job.setMapperClass(Map.class);
		job.setCombinerClass(Reduce.class);
		job.setReducerClass(Reduce.class);
		
		//设置输入输出格式的处理
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(Text.class);
		
		//设定路径
		FileInputFormat.addInputPath(job,new Path(otherArgs[0]));
		FileOutputFormat.setOutputPath(job,new Path(otherArgs[1]));
		/*
		 * 对应于自动的寻找路径
		 * FileInputFormat.addInputPath(job,new Path(args[0]));
		 * FileOutputFormat.setOutputPath(job,new Path(args[1])); 
		 * */
		
		job.waitForCompletion(true);
		
		//打印相关信息
		System.out.println("任务名称: "+job.getJobName());
		System.out.println("任务成功: "+(job.isSuccessful()?"Yes":"No"));	
	}
}

bubuko.com,布布扣

数据排序:

数据排序的时候,在map的阶段已经处理好了, 只是reduce在输出的时候用行号去标记一下,样例如下:
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class DataSort {

	/**
	 * @param XD
	 */
	public static class Map extends Mapper<Object,Text,IntWritable,IntWritable>{
		private static IntWritable data = new IntWritable();
		public void map(Object key,Text value,Context context) throws IOException, InterruptedException{
			String line = value.toString();
			data.set(Integer.parseInt(line));
			context.write(data, new IntWritable(1));
		}
	}
	public static class Reduce extends Reducer<IntWritable,IntWritable,IntWritable,IntWritable>{
		private static IntWritable linenum = new IntWritable(1);
		public void reduce(IntWritable key,Iterable<IntWritable> values,Context context) throws IOException, InterruptedException{
			for(IntWritable val:values){
				context.write(linenum,key);
				linenum = new IntWritable(linenum.get()+1);
			}
		}
	}
	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
		// TODO Auto-generated method stub
		//初始化配置
		Configuration conf = new Configuration();
		
		/*类比与之前默认的args,只是在程序中实现配置,这样不必去eclipse的arguments属性添加参数,
		**但是认为作用一样根据个人喜好设置,如下图所示:
		*/
		//设置输入输出路径
		String[] ioArgs = new String[]{"hdfs://localhost:9000/home/xd/hadoop_tmp/Sort_in",
															"hdfs://localhost:9000/home/xd/hadoop_tmp/Sort_out"};
		
		String[] otherArgs = new GenericOptionsParser(conf,ioArgs).getRemainingArgs();
		
		if(otherArgs.length!=2){
			System.err.println("Usage:Data Deduplication <in> <out>");
			System.exit(2);
		}
		//设置作业
		Job job = new Job(conf,"Datasort Job");
		job.setJarByClass(DataSort.class);
		
		//设置处理map,reduce的类
		job.setMapperClass(Map.class);
		job.setReducerClass(Reduce.class);
		
		//设置输入输出格式的处理
		job.setOutputKeyClass(IntWritable.class);
		job.setOutputValueClass(IntWritable.class);
		
		//设定路径
		FileInputFormat.addInputPath(job,new Path(otherArgs[0]));
		FileOutputFormat.setOutputPath(job,new Path(otherArgs[1]));
		/*
		 * 对应于自动的寻找路径
		 * FileInputFormat.addInputPath(job,new Path(args[0]));
		 * FileOutputFormat.setOutputPath(job,new Path(args[1])); 
		 * */	
		job.waitForCompletion(true);
		
		//打印相关信息
		System.out.println("任务名称: "+job.getJobName());
		System.out.println("任务成功: "+(job.isSuccessful()?"Yes":"No"));	
	}
}

bubuko.com,布布扣

Hadoop mapreduce 数据去重 数据排序小例子,布布扣,bubuko.com

Hadoop mapreduce 数据去重 数据排序小例子

标签:hadoop   mapreduce   数据去重   

原文地址:http://blog.csdn.net/xd_122/article/details/38684605

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!