在AWS里用Elastic Map Reduce 开一个Cluster
然后登陆master node并编译以下程序:
import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class WordCount { public static class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> { private final IntWritable one = new IntWritable(1); private Text word = new Text(); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); StringTokenizer tokenizer = new StringTokenizer(line); while(tokenizer.hasMoreTokens()) { word.set(tokenizer.nextToken()); context.write(word, one); } } } public static class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> { @Override protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { int sum = 0; for(IntWritable value : values) { sum += value.get(); } context.write(key, new IntWritable(sum)); } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = new Job(conf, "Word Count hadoop-0.20"); //setting the class names job.setJarByClass(WordCount.class); job.setMapperClass(WordCountMapper.class); job.setReducerClass(WordCountReducer.class); //setting the output data type classes job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); //to accept the hdfs input and outpur dir at run time FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); System.exit(job.waitForCompletion(true) ? 0 : 1); } }
设置:
export CLASSPATH=$CLASSPATH:/home/hadoop/*:/home/hadoop/lib/*:‘.‘
javac WordCount.java
jar cvf WordCount.jar *.class
hadoop jar WordCount.jar WordCount s3://15-319-s13/book-dataset/pg_00 /output
运行成功后,因为output文件夹在Hadoop FS下,所以可以这样查看:
hadoop fs -cat /output/part-r-00000 | less
主要参考:
http://kickstarthadoop.blogspot.com/2011/04/word-count-hadoop-map-reduce-example.html
http://kickstarthadoop.blogspot.com/2011/05/word-count-example-with-hadoop-020.html
Hadoop AWS Word Count 例子,码迷,mamicode.com
原文地址:http://blog.csdn.net/fightforyourdream/article/details/24827603