Hadoop中的简单程序得到了ClassNotFoundException

时间:2018-05-19 02:58:57

标签: java hadoop mapreduce word-count

最近我在hadoop的WordCount示例中重写了代码,但是当我在我的虚拟机(带有hadoop和java set的ubuntu服务器14.04)上运行它时,我得到了 ClassNotFoundException ...我有已经厌倦了互联网上发现的许多解决方案,但他们没有工作。我能做些什么来解决这个问题? Error

我的代码是:

        package org.apache.hadoop.examples;
        import java.io.IOException;
        import java.util.StringTokenizer;
        import org.apache.hadoop.conf.Configuration;
        import org.apache.hadoop.fs.Path;
        import org.apache.hadoop.io.IntWritable;
        import org.apache.hadoop.io.FloatWritable;
        import org.apache.hadoop.io.Text;
        import org.apache.hadoop.mapreduce.Job;
        import org.apache.hadoop.mapreduce.Mapper;
        import org.apache.hadoop.mapreduce.Reducer;

        import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

        import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

        import org.apache.hadoop.util.GenericOptionsParser;

        public class myhadoop 
        {

            public static int total_number = 0;

            public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> 
            {
                private final static IntWritable one = new IntWritable(1);

                private Text word = new Text();



                public void map(Object key, Text value, Context context) throws IOException, InterruptedException 
                {
                    StringTokenizer itr = new StringTokenizer(value.toString());
                    while (itr.hasMoreTokens()) 
                    {

                        word.set(itr.nextToken());

                        context.write(word, one);

                        total_number = total_number + 1;

                    }

                }
            }

            public static class IntSumCombiner extends Reducer<Text,IntWritable,Text,IntWritable> {

                private IntWritable result = new IntWritable();

                public void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException 
                {

                    int sum = 0;

                    for (IntWritable val : values) {

                    sum += val.get();

                    }

                    result.set(sum);

                    context.write(key, result);

                }

            }

            public static class ResultCountReducer extends Reducer<Text,IntWritable,Text,FloatWritable> {

                private FloatWritable result = new FloatWritable();

                public void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException 
                {

                    int sum = 0;

                    for (IntWritable val : values) {

                    sum += val.get();

                    }
                            float frequncy = sum / total_number;

                    result.set(frequncy);

                    context.write(key, result);

                }

        }



            public static void main(String[] args) throws Exception 
            {

                Configuration conf = new Configuration();

                String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();

                if (otherArgs.length != 2) 
                {

                    System.err.println("Usage: myhadoop <in> <out>");

                    System.exit(2);

                }

                Job job = new Job(conf, "myhadoop");

                job.setJarByClass(myhadoop.class);

                job.setMapperClass(TokenizerMapper.class);

                job.setCombinerClass(IntSumCombiner.class);

                job.setReducerClass(ResultCountReducer.class);

                job.setOutputKeyClass(Text.class);

                job.setOutputValueClass(FloatWritable.class);

                FileInputFormat.addInputPath(job, new Path(otherArgs[0]));

                FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

                System.exit(job.waitForCompletion(true) ? 0 : 1);
            }
        }

1 个答案:

答案 0 :(得分:0)

来自评论的解决方案: 删除第一行,即包导入

'package org.apache.hadoop.examples;'

更改代码,替换

Job.setJarByClass(),

通过

Job.setJar()
相关问题