数据形式:
第一行 ABCDE
第二行 CDEF
利用mapreduce统计出现单词的个数(ABCDEF一共6个单词)
这不需要mapreduce 直接map输出就可以了,reduce用不到吧。
import java.io.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCount
{
public static class WordCountMapper
extends Mapper<Object,Text,Text,IntWritable>
{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key,Text value,Context context)
throws IOException, InterruptedException {
String[] words = value.toString().split(",");
for (String str: words)
{
//去除空格,防止越界
String ss=str.replace(" ","");
for(int i=0;i<ss.length();i++){
word.set(str.substring(i,i+1));
context.write(word,one);
}
}
}
}
public static class WordCountReducer extends Reducer<Text,IntWritable,Text,IntWritable> {
public void reduce(Text key,Iterable<IntWritable> values,Context context) throws IOException, InterruptedException {
int total=0;
for (IntWritable val : values){
total += val.get();
}
IntWritable sum = new IntWritable(total);
context.write(key, sum);
}
}
//INPUT_PATH:文件在hdfs上的路径
private static final String INPUT_PATH = "/mr.txt";
//OUTPUT_PATH:文件输出路径
private static final String OUTPUT_PATH = "/mr/out/";
public static void main (String[] args) throws Exception{
Configuration conf = new Configuration();
conf.set("mapred.jar","wc.jar");
Job job = new Job(conf, "word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.setInputPaths(job, INPUT_PATH);
FileOutputFormat.setOutputPath(job, new Path(OUTPUT_PATH));
}
}