任务描述:
让原始数据中出现次数超过一次的数据在输出文件中只出现一次。
example data:
2015-3-1 a
2015-3-2 b
2015-3-3 c
2015-3-4 d
2015-3-5 e
2015-3-6 f
2015-3-7 g
2015-3-1 a
2015-3-2 b
2015-3-3 c
2015-3-4 d
2015-3-5 e
2015-3-6 f
2015-3-7 g
2015-3-1 a
2015-3-2 b
2015-3-3 c
2015-3-4 d
2015-3-5 e
2015-3-6 f
2015-3-7 g
code:
package mrTest; import java.io.IOException; import java.util.Date; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import com.ibm.icu.text.SimpleDateFormat; public class shujuquchong { public static class Map extends Mapper<Object, Text, Text, NullWritable>{ public void map(Object key,Text value,Context context) throws IOException, InterruptedException{ context.write(value, NullWritable.get()); } } public static class Reduce extends Reducer< Text, NullWritable, Text, NullWritable>{ public void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException{ context.write(key, NullWritable.get()); } } public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException { // TODO Auto-generated method stub Job job = new Job(new Configuration(), " 数据去重"); job.setJarByClass(shujuquchong.class); job.setNumReduceTasks(1); job.setOutputKeyClass(Text.class); job.setOutputValueClass(NullWritable.class); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); //记录时间 SimpleDateFormat sdf = new SimpleDateFormat(); Date start = new Date(); //开始时间 int result = job.waitForCompletion(true)? 0 : 1; //任务开始 Date end = new Date(); //结束时间 float time = (float)((end.getTime() - start.getTime()) / 60000.0); //任务开始到结束经历的时间 System.out.println("Job 开始的时间为:" + start); System.out.println("Job 结束的时间为:" + end); System.out.println("Job 经历的时间为:" + time + "分钟"); System.out.println("Job 的名字:" + job.getJobName()); System.out.println("Job 是否成功:" + job.isSuccessful() ); System.out.println("Job 输入的行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_INPUT_RECORDS").getValue()); System.out.println("Job 输出的行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_OUTPUT_RECORDS").getValue()); System.out.println("Job 输入的行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "REDUCE_INPUT_RECORDS").getValue()); System.out.println("Job 输出的行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "REDUCE_OUTPUT_RECORDS").getValue()); System.exit(result); //判断是否结束 } }
结果展示: