如下代码在伪分布模式下运行报错,各位大神有什么相应的解决办法吗?
package preview;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class SortNum extends Configured implements Tool {
        public static class MapClass extends Mapper<Object, Text, IntWritable, IntWritable>{
                private static IntWritable data = new IntWritable();
                /**
                 * map 将输入中的value转化成IntWritable类型,作为输出的Key
                 */
                @Override
                protected void map(Object key, Text value, Context context)
                                throws IOException, InterruptedException {
                        String line = value.toString();
                        //data保存实际值,通过map的key输出
                        data.set(Integer.parseInt(line));
                        context.write(data, new IntWritable(1));
                }
        }
        
        public static class Reduce extends Reducer<IntWritable, IntWritable, IntWritable, IntWritable>{
                private static IntWritable lineNum = new IntWritable(1);
                @Override
                protected void reduce(IntWritable key, Iterable<IntWritable> values,
                                Context context)
                                throws IOException, InterruptedException {
                        for (IntWritable value : values) {
                                context.write(lineNum, key);
                                lineNum = new IntWritable(lineNum.get()+1);
                        }
                }
        }
        
        public static class Partition extends Partitioner<IntWritable, IntWritable>{
                @Override
                public int getPartition(IntWritable key, IntWritable value, int numPartitions) {
                        int Maxnumber = 65223;
                        int bound = Maxnumber / numPartitions + 1;
                        int keynumber = key.get();
                        for (int i = 0; i < numPartitions; i++) {
                                if(keynumber < bound*i && keynumber >= bound*(i-1))
                                        return i-1;
                        }
                        return -1;
                }
        }
        
        public int run(String[] args) throws Exception {
                
                Configuration conf = getConf();
                Job job = new Job(conf);
                job.setJobName("SortNum");
                job.setJarByClass(SortNum.class);
                
                Path in = new Path(args[0]);
                Path out = new Path(args[1]);
                FileInputFormat.setInputPaths(job, in);
                FileOutputFormat.setOutputPath(job, out);
                
                job.setMapperClass(MapClass.class);
                job.setReducerClass(Reduce.class);
                job.setPartitionerClass(Partition.class);
                
                job.setInputFormatClass(TextInputFormat.class);
                job.setOutputFormatClass(TextOutputFormat.class);
                job.setOutputKeyClass(Text.class);
                job.setOutputValueClass(Text.class);
                
                System.exit(job.waitForCompletion(true)?0:1);
                return 0;
        }
        public static void main(String[] args) {
                int ret = 0;
                try {
                        ret = ToolRunner.run(new Configuration(), new SortNum(), args);
                } catch (Exception e) {
                        e.printStackTrace();
                }
                System.exit(ret);
        }
} 报错信息: 
 
Exception in thread "main" java.io.IOException: Error opening job jar: wordcount.jar
        at org.apache.hadoop.util.RunJar.main(RunJar.java:90)
Caused by: java.util.zip.ZipException: error in opening zip file
        at java.util.zip.ZipFile.open(Native Method)
        at java.util.zip.ZipFile.<init>(ZipFile.java:127)
        at java.util.jar.JarFile.<init>(JarFile.java:135)
        at java.util.jar.JarFile.<init>(JarFile.java:72)
        at org.apache.hadoop.util.RunJar.main(RunJar.java:88) 还有就是,在Hadoop环境下有什么比较好的程序调试工具吗?可以像eclipse debug那样跟踪代码的。跪谢! 
                    版权声明:本文内容由阿里云实名注册用户自发贡献,版权归原作者所有,阿里云开发者社区不拥有其著作权,亦不承担相应法律责任。具体规则请查看《阿里云开发者社区用户服务协议》和《阿里云开发者社区知识产权保护指引》。如果您发现本社区中有涉嫌抄袭的内容,填写侵权投诉表单进行举报,一经查实,本社区将立刻删除涉嫌侵权内容。
有eclipse的hadoop插件。根据hadoop版本,自己编译一个。程序没错,是部署<spanstyle="font-family:Consolas,'BitstreamVeraSansMono','CourierNew',Courier,monospace;line-height:14.65625px;background-color:#FFFFFF;">wordcount.jar的路径问题。这种错误以前我也遇到过。你尝试把绝对路径加进去再看看