import java.io.IOException;import java.util.ArrayList;import java.util.Iterator;import java.util.List;import java.util.StringTokenizer;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.conf.Configured;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapred.FileInputFormat;import org.apache.hadoop.mapred.FileOutputFormat;import org.apache.hadoop.mapred.JobClient;import org.apache.hadoop.mapred.JobConf;import org.apache.hadoop.mapred.MapReduceBase;import org.apache.hadoop.mapred.Mapper;import org.apache.hadoop.mapred.OutputCollector;import org.apache.hadoop.mapred.Reducer;import org.apache.hadoop.mapred.Reporter;import org.apache.hadoop.util.Tool;import org.apache.hadoop.util.ToolRunner;public class WordCount extends Configured implements Tool { public static class MapClass extends MapReduceBase implements Mapper { private final static IntWritable one = new IntWritable(1); private Text word = new Text(); public void map(LongWritable key, Text value, OutputCollector output, Reporter reporter) throws IOException { String line = value.toString(); StringTokenizer itr = new StringTokenizer(line); while (itr.hasMoreTokens()) { word.set(itr.nextToken()); output.collect(word, one); } } } /** * A reducer class that just emits the sum of the input values. */ public static class Reduce extends MapReduceBase implements Reducer { public void reduce(Text key, Iterator values, OutputCollector output, Reporter reporter) throws IOException { int sum = 0; while (values.hasNext()) { sum += values.next().get(); } output.collect(key, new IntWritable(sum)); } } static int printUsage() { System.out.println("wordcount [-m ] [-r ]
[admin@host WordCount]$ hadoop jar WordCount.jar WordCount /tmp/input /tmp/output 10/09/1622:49:43 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same. 10/09/1622:49:43 INFO mapred.FileInputFormat: Total input paths to process :2 10/09/1622:49:43 INFO mapred.JobClient: Running job: job_201008171228_76165 10/09/1622:49:44 INFO mapred.JobClient: map 0% reduce 0% 10/09/1622:49:47 INFO mapred.JobClient: map 100% reduce 0% 10/09/1622:49:54 INFO mapred.JobClient: map 100% reduce 100% 10/09/1622:49:55 INFO mapred.JobClient: Job complete: job_201008171228_76165 10/09/1622:49:55 INFO mapred.JobClient: Counters: 16 10/09/1622:49:55 INFO mapred.JobClient: File Systems 10/09/1622:49:55 INFO mapred.JobClient: HDFS bytes read=62 10/09/1622:49:55 INFO mapred.JobClient: HDFS bytes written=73 10/09/1622:49:55 INFO mapred.JobClient: Local bytes read=152 10/09/1622:49:55 INFO mapred.JobClient: Local bytes written=366 10/09/1622:49:55 INFO mapred.JobClient: Job Counters 10/09/1622:49:55 INFO mapred.JobClient: Launched reduce tasks=1 10/09/1622:49:55 INFO mapred.JobClient: Rack-local map tasks=2 10/09/1622:49:55 INFO mapred.JobClient: Launched map tasks=2 10/09/1622:49:55 INFO mapred.JobClient: Map-Reduce Framework 10/09/1622:49:55 INFO mapred.JobClient: Reduce input groups=11 10/09/1622:49:55 INFO mapred.JobClient: Combine output records=14 10/09/1622:49:55 INFO mapred.JobClient: Map input records=4 10/09/1622:49:55 INFO mapred.JobClient: Reduce output records=11 10/09/1622:49:55 INFO mapred.JobClient: Map output bytes=118 10/09/1622:49:55 INFO mapred.JobClient: Map input bytes=62 10/09/1622:49:55 INFO mapred.JobClient: Combine input records=14 10/09/1622:49:55 INFO mapred.JobClient: Map output records=14 10/09/1622:49:55 INFO mapred.JobClient: Reduce input records=14
6. 查看运行结果
[admin@host WordCount]$ hadoop fs -ls /tmp/output/ Found 2 items drwxr-x---- admin admin 02010-09-1622:43/tmp/output/_logs -rw-r-----1 admin admin 1022010-09-1622:44/tmp/output/part-00000 [admin@host WordCount]$ hadoop fs -cat /tmp/output/part-00000 Hello, 1 You 1 are 2 china 1 hello, 1 i 2 love 2 ok 1 ok?1 word 1 you 1