开发者社区> 问答> 正文

Eclipse中运行Wordcount程序报错?报错

Wordcount程序如下:

import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;

public class WordCount {

    public static class Map extends MapReduceBase implements
            Mapper<LongWritable, Text, Text, IntWritable> {
        private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();

        public void map(LongWritable key, Text value,
                OutputCollector<Text, IntWritable> output, Reporter reporter)
                throws IOException {
            String line = value.toString();
            StringTokenizer tokenizer = new StringTokenizer(line);
            while (tokenizer.hasMoreTokens()) {
                word.set(tokenizer.nextToken());
                output.collect(word, one);
            }
        }
    }

    public static class Reduce extends MapReduceBase implements
            Reducer<Text, IntWritable, Text, IntWritable> {
        public void reduce(Text key, Iterator<IntWritable> values,
                OutputCollector<Text, IntWritable> output, Reporter reporter)
                throws IOException {
            int sum = 0;
            while (values.hasNext()) {
                sum += values.next().get();
            }
            output.collect(key, new IntWritable(sum));
        }
    }

    public static void main(String[] args) throws Exception {
        JobConf conf = new JobConf(WordCount.class);
        conf.setJobName("wordcount");

        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(IntWritable.class);

        conf.setMapperClass(Map.class);
        conf.setCombinerClass(Reduce.class);
        conf.setReducerClass(Reduce.class);

        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);

        FileInputFormat.setInputPaths(conf, new Path("hdfs://192.168.1.181:9000/user/root/bbs_post2"));
        FileOutputFormat.setOutputPath(conf, new Path("hdfs://192.168.1.181:9000/data/test-out10"));
//        FileInputFormat.setInputPaths(conf, new Path(args[0]));
//        FileOutputFormat.setOutputPath(conf, new Path(args[1]));
        JobClient.runJob(conf);
    }
}
报错信息如下:

log4j:WARN No appenders could be found for logger (org.apache.hadoop.metrics2.lib.MutableMetricsFactory).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
Exception in thread "main" java.lang.NullPointerException
    at java.lang.ProcessBuilder.start(Unknown Source)
    at org.apache.hadoop.util.Shell.runCommand(Shell.java:445)
    at org.apache.hadoop.util.Shell.run(Shell.java:418)
    at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:650)
    at org.apache.hadoop.util.Shell.execCommand(Shell.java:739)
    at org.apache.hadoop.util.Shell.execCommand(Shell.java:722)
    at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:631)
    at org.apache.hadoop.fs.RawLocalFileSystem.mkdirs(RawLocalFileSystem.java:421)
    at org.apache.hadoop.fs.FilterFileSystem.mkdirs(FilterFileSystem.java:277)
    at org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir(JobSubmissionFiles.java:125)
    at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:348)
    at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1285)
    at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1282)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Unknown Source)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1548)
    at org.apache.hadoop.mapreduce.Job.submit(Job.java:1282)
    at org.apache.hadoop.mapred.JobClient$1.run(JobClient.java:562)
    at org.apache.hadoop.mapred.JobClient$1.run(JobClient.java:557)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Unknown Source)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1548)
    at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:557)
    at org.apache.hadoop.mapred.JobClient.submitJob(JobClient.java:548)
    at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:833)
    at WordCount.main(WordCount.java:71)

麻烦大家帮我看一下是怎么回事,谢谢!

展开
收起
爱吃鱼的程序员 2020-06-22 10:45:51 594 0
1 条回答
写回答
取消 提交回答
  • https://developer.aliyun.com/profile/5yerqm5bn5yqg?spm=a2c6h.12873639.0.0.6eae304abcjaIB


    1.classpath下面是否放置hadoopconf下面的*-site.xml文件

    2.执行程序的主机是否配置了hadoop集群的hosts

    3.是否把该程序打包放在Hadooplib下并且重启了集群,如果不想打包,可以设置conf.setJarClass( Map.class)该包会被分发到集群。


    importjava.io.IOException;4importjava.util.*;56importorg.apache.hadoop.fs.Path;7importorg.apache.hadoop.conf.*;8importorg.apache.hadoop.io.*;9importorg.apache.hadoop.mapreduce.*;10importorg.apache.hadoop.mapreduce.lib.input.FileInputFormat;11importorg.apache.hadoop.mapreduce.lib.input.TextInputFormat;12importorg.apache.hadoop.mapreduce.lib.output.FileOutputFormat;13importorg.apache.hadoop.mapreduce.lib.output.TextOutputFormat;1415publicclassWordCount{1617publicstaticclassMapextendsMapper<LongWritable,Text,Text,IntWritable>{18privatefinalstaticIntWritableone=newIntWritable(1);19privateTextword=newText();2021publicvoidmap(LongWritablekey,Textvalue,Contextcontext)throwsIOException,InterruptedException{22Stringline=value.toString();23StringTokenizertokenizer=newStringTokenizer(line);24while(tokenizer.hasMoreTokens()){25word.set(tokenizer.nextToken());26context.write(word,one);27}28}29}3031publicstaticclassReduceextendsReducer<Text,IntWritable,Text,IntWritable>{3233publicvoidreduce(Textkey,Iterable<IntWritable>values,Contextcontext)34throwsIOException,InterruptedException{35intsum=0;36for(IntWritableval:values){37sum+=val.get();38}39context.write(key,newIntWritable(sum));40}41}4243publicstaticvoidmain(String[]args)throwsException{44Configurationconf=newConfiguration();4546Jobjob=newJob(conf,"wordcount");4748job.setOutputKeyClass(Text.class);49job.setOutputValueClass(IntWritable.class);5051job.setMapperClass(Map.class);52job.setReducerClass(Reduce.class);5354job.setInputFormatClass(TextInputFormat.class);55job.setOutputFormatClass(TextOutputFormat.class);5657FileInputFormat.addInputPath(job,newPath(args[0]));58FileOutputFormat.setOutputPath(job,newPath(args[1]));5960job.waitForCompletion(true);61}6263}



    引用来自“张东昊”的评论


    1.classpath下面是否放置hadoopconf下面的*-site.xml文件

    2.执行程序的主机是否配置了hadoop集群的hosts

    3.是否把该程序打包放在Hadooplib下并且重启了集群,如果不想打包,可以设置conf.setJarClass( Map.class)该包会被分发到集群。


    楼主怎么解决的?

    引用来自“Nob”的评论

    楼主怎么解决的?
    2020-06-22 10:46:07
    赞同 展开评论 打赏
问答排行榜
最热
最新

相关电子书

更多
低代码开发师(初级)实战教程 立即下载
冬季实战营第三期:MySQL数据库进阶实战 立即下载
阿里巴巴DevOps 最佳实践手册 立即下载

相关镜像