尝试运行Yarn mapreduce进程时获取NoSuchMethodError异常

时间:2015-03-01 16:31:32

标签: java hadoop mapreduce

我得到以下异常:

线程中的异常" AWT-EventQueue-0" java.lang.NoSuchMethodError:org.apache.hadoop.mapred.JobConf.setBooleanIfUnset(Ljava / lang / String; Z)V

这是我的代码:

public static void CreateVector(String CBresults,
                                String outpath,
                                int nummappers,
                                int numreducers) throws IOException, ClassNotFoundException, InterruptedException {
    System.out.println("NUM_FMAP_TASKS: "     + nummappers);
    System.out.println("NUM_FREDUCE_TASKS: "  + numreducers);
    Configuration conf = new Configuration();
    Job job = new Job(conf, "VectorCreator");

    job.setJarByClass(VectorCreator.class);

    job.setNumReduceTasks(numreducers);

    FileInputFormat.addInputPath(job,new Path(CBresults));
    job.setMapperClass(VectorCreator.ClusterMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);

    Path oPath = new Path(outpath);
    FileOutputFormat.setOutputPath(job, oPath);
    job.setReducerClass(VectorCreator.ClusterReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    //conf.setOutputFormat(SequenceFileOutputFormat.class);
    //conf.setOutputPath(oPath);
    System.err.println("  Removing old results");
    oPath = new Path(outpath);
    FileSystem fs = FileSystem.get(job.getConfiguration());
    fs.delete(oPath, true); // delete file, true for recursive
    int code = job.waitForCompletion(true) ? 0 : 1;
    System.exit(code);

    System.err.println("Create Vector Finished");
}

这是我的导入:

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;

这是我的pom.xml文件:

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <parent>
        <artifactId>WWH-BIO</artifactId>
        <groupId>WWH</groupId>
        <version>1.0-SNAPSHOT</version>
    </parent>
    <modelVersion>4.0.0</modelVersion>

    <artifactId>VectorCreator</artifactId>

    <dependencies>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-core</artifactId>
            <version>0.20.2</version>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>2.2.0</version>
        </dependency>
    </dependencies>
</project>

我非常确定它的依赖关系。

0 个答案:

没有答案