运行MapReduce程序时出现ClassNotFound异常

时间:2016-06-27 06:16:26

标签: java eclipse hadoop mapreduce classnotfoundexception

我正在为矩阵添加编写mapreduce程序。由于它需要2个输入文件,我使用的是MultipleInputs。我有以下这些课程

MatAddMapper1.java

CosmosView

MatAddMapper2.java与此类似。

MatAddReducer.java

package mapred;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class MatAddMapper1 extends Mapper<LongWritable, Text, Text, IntWritable> 
{
    //private static final int MISSING = 9999;
    @Override
    public void map(LongWritable key, Text value, Context context)
    throws IOException, InterruptedException
    {
        String line = value.toString();
        String[] content = line.split (" ");
        String key1 = content[0] + " " + content[1];
        int val = Integer.parseInt(content[2]);
        // Key is (i,j)
        context.write(new Text(key1), new IntWritable(val));
    }
}

MatAddApp.java(主类)

package mapred;

import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class MatAddReducer
extends Reducer<Text, IntWritable, Text, IntWritable> 
{
    @Override
    public void reduce(Text key, Iterable<IntWritable> values, Context context)
    throws IOException, InterruptedException 
    {
        int val = 0;
        for (IntWritable value : values) 
        {
            val = val + value.get();
        }
        context.write(key, new IntWritable(val));
    }
}

我正在使用eclipse并创建了一个jar文件MatAddition.jar。 M.txt和N.txt是输入矩阵。当我试图在我的hadoop集群中运行程序时,我收到以下错误

package mapred;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class MatAddApp extends Configured implements Tool
{


     public int run(String[] args) throws Exception 
     {
         Configuration conf = new Configuration();
         @SuppressWarnings("deprecation")
         Job job = new Job(conf, "Matrix Addition");
         job.setJarByClass(MatAddApp.class);
         MultipleInputs.addInputPath(job,new Path(args[0]),TextInputFormat.class,MatAddMapper1.class);
         MultipleInputs.addInputPath(job,new Path(args[1]),TextInputFormat.class,MatAddMapper2.class);

         FileOutputFormat.setOutputPath(job, new Path(args[2]));
         job.setReducerClass(MatAddReducer.class);
         job.setOutputKeyClass(Text.class);
         job.setOutputValueClass(IntWritable.class);

         return (job.waitForCompletion(true) ? 0 : 1);

     }

     public static void main(String[] args) throws Exception 
     {
         int ecode = ToolRunner.run(new MatAddApp(), args);
         System.exit(ecode);
     }

}

2 个答案:

答案 0 :(得分:1)

问题是因为classname。在配置中设置时,驱动程序类名应该是完全限定的,如下所示:

 job.setJarByClass(mapred.MatAddApp.class); 

答案 1 :(得分:0)

INPUT.TXT

A,0|0,1.0
A,0|1,2.0
A,0|2,3.0
A,0|3,4.0
A,1|0,5.0
A,1|1,6.0
A,1|2,7.0
A,1|3,8.0
B,0|0,1.0
B,0|1,2.0
B,0|2,3.0
B,0|3,4.0
B,1|0,5.0
B,1|1,6.0
B,1|2,7.0
B,1|3,8.0

这里,第一列表示矩阵的名称,第二列表示索引,第三列表示值。

MatrixAdd.java

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.Text;
import java.util.ArrayList;
import java.util.Iterator;
import java.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class MatrixAdd {
    public static class MatMapper extends Mapper<Object, Text, Text, DoubleWritable>{
        private Text index = new Text();
        private final static DoubleWritable num = new DoubleWritable();
        public void map(Object key, Text value, Context context) throws IOException, InterruptedException{      
            String record = value.toString();
            String[] parts = record.split(",");
            index.set(parts[1]);
            num.set(Double.parseDouble(parts[2]));
            context.write(index, num);
        }
    }
    public static class MatReducer extends Reducer<Text,DoubleWritable,Text,DoubleWritable> {
        private DoubleWritable result = new DoubleWritable();
        public void reduce(Text key, Iterable<DoubleWritable> values, Context context) throws IOException, InterruptedException {
            double sumValue = 0;
            for(DoubleWritable val: values) {
                sumValue += val.get();
            }
            result.set(sumValue);
            context.write(key, result);
        }
    }
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf, "max temp");
        job.setJarByClass(MatrixAdd.class);
        job.setMapperClass(MatMapper.class);
        job.setCombinerClass(MatReducer.class);
        job.setReducerClass(MatReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(DoubleWritable.class);
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }    
}

输出:

0|0 2.0
0|1 4.0
0|2 6.0
0|3 8.0
1|0 10.0
1|1 12.0
1|2 14.0
1|3 16.0