在Hadoop Mapper中没有调用安装方法

时间:2014-03-17 19:05:28

标签: java join hadoop mapreduce mapper

我运行一系列Hadoop Mapper / Reducers并获取一个Movie ID列表。我使用MovieData文件根据这些ID显示电影的名称。我正在使用Mapper类,如下所示。我看到setUp方法没有被调用,因为我没有看到print语句,当我尝试使用在load方法中加载的这个HashMap时,我得到一个Null Exception。以下是代码。任何指针都表示赞赏。

import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;

import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Mapper.Context;

public class MovieNamesMapper extends MapReduceBase implements Mapper<Object, Text, Text, Text> {

    private static HashMap<String, String> movieNameHashMap = new HashMap<String, String>();
    private BufferedReader bufferedReader;
    private String movieId = "";

    protected void setup(Context context) throws IOException,
            InterruptedException {

        System.out.println("Setting up system..");

        Path[] cacheFilesLocal = DistributedCache.getLocalCacheFiles(context
                .getConfiguration());

        for (Path eachPath : cacheFilesLocal) {
            if (eachPath.getName().toString().trim().equals("u.item")) {
                loadMovieNamesHashMap(eachPath, context);
            }
        }

    }

    private void loadMovieNamesHashMap(Path filePath, Context context)
            throws IOException {

        System.out.println("Loading movie names..");

        String strLineRead = "";

        try {
            bufferedReader = new BufferedReader(new FileReader(
                    filePath.toString()));

            while ((strLineRead = bufferedReader.readLine()) != null) {
                String movieIdArray[] = strLineRead.toString().split("\t|::");
                movieNameHashMap.put(movieIdArray[0].trim(),
                        movieIdArray[1].trim());
            }
        } catch (FileNotFoundException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        } finally {
            if (bufferedReader != null) {
                bufferedReader.close();

            }

        }

    }

    public void map(Object key, Text value, OutputCollector<Text, Text> output,
            Reporter reporter) throws IOException {
        System.out.println(key.toString() + " - " + value.toString());
        if (value.toString().length() > 0) {
            String moviePairArray[] = value.toString().split(":");

            for (String moviePair : moviePairArray) {
                String movieArray[] = moviePair.split(",");
                output.collect(new Text(movieNameHashMap.get(movieArray[0])),
                        new Text(movieNameHashMap.get(movieArray[1])));
            }
        }

    }

    public String getMovieId() {
        return movieId;
    }

    public void setMovieId(String movieId) {
        this.movieId = movieId;
    }

}

以下是我的跑步方法。

public int run(String[] args) throws Exception {

    // For finding user and his rated movie list.
    JobConf conf1 = new JobConf(MovieTopDriver.class);
    conf1.setMapperClass(MoviePairsMapper.class);
    conf1.setReducerClass(MoviePairsReducer.class);

    conf1.setJarByClass(MovieTopDriver.class);

    FileInputFormat.addInputPath(conf1, new Path(args[0]));
    FileOutputFormat.setOutputPath(conf1, new Path("temp"));

    conf1.setMapOutputKeyClass(Text.class);
    conf1.setMapOutputValueClass(Text.class);

    conf1.setOutputKeyClass(Text.class);
    conf1.setOutputValueClass(IntWritable.class);

    // For finding movie pairs.
    JobConf conf2 = new JobConf(MovieTopDriver.class);
    conf2.setMapperClass(MoviePairsCoOccurMapper.class);
    conf2.setReducerClass(MoviePairsCoOccurReducer.class);

    conf2.setJarByClass(MovieTopDriver.class);

    FileInputFormat.addInputPath(conf2, new Path("temp"));
    FileOutputFormat.setOutputPath(conf2, new Path("freq_temp"));

    conf2.setInputFormat(KeyValueTextInputFormat.class);

    conf2.setMapOutputKeyClass(Text.class);
    conf2.setMapOutputValueClass(IntWritable.class);

    conf2.setOutputKeyClass(Text.class);
    conf2.setOutputValueClass(IntWritable.class);

    // Find top frequent movies along with their names.
    // Output Freq, moviePair
    // Keep a count and output only 20.

    JobConf conf3 = new JobConf(MovieTopDriver.class);
    conf3.setMapperClass(ValueKeyMapper.class);
    conf3.setReducerClass(ValueKeyReducer.class);

    conf3.setJarByClass(MovieTopDriver.class);

    FileInputFormat.addInputPath(conf3, new Path("freq_temp"));
    FileOutputFormat.setOutputPath(conf3, new Path("freq_temp2"));

    conf3.setInputFormat(KeyValueTextInputFormat.class);
    conf3.setMapOutputKeyClass(IntWritable.class);
    conf3.setMapOutputValueClass(Text.class);

    conf3.setOutputKeyClass(IntWritable.class);
    conf3.setOutputValueClass(Text.class);

    // Use only one reducer as we want to sort.
    conf3.setNumReduceTasks(1);

    // To sort in decreasing order.
    conf3.setOutputKeyComparatorClass(LongWritable.DecreasingComparator.class);

    // Find top movie name
    // Use a mapper side join to output names.

    JobConf conf4 = new JobConf(MovieTopDriver.class);
    conf4.setMapperClass(MovieNamesMapper.class);
    conf4.setJarByClass(MovieTopDriver.class);

    FileInputFormat.addInputPath(conf4, new Path("freq_temp2"));
    FileOutputFormat.setOutputPath(conf4, new Path(args[1]));

    conf4.setInputFormat(KeyValueTextInputFormat.class);
    conf4.setMapOutputKeyClass(Text.class);
    conf4.setMapOutputValueClass(Text.class);

    // Run the jobs

    Job job1 = new Job(conf1);
    Job job2 = new Job(conf2);
    Job job3 = new Job(conf3);
    Job job4 = new Job(conf4);

    JobControl jobControl = new JobControl("jobControl");
    jobControl.addJob(job1);
    jobControl.addJob(job2);
    jobControl.addJob(job3);
    jobControl.addJob(job4);
    job2.addDependingJob(job1);
    job3.addDependingJob(job2);
    job4.addDependingJob(job3);
    handleRun(jobControl);

    FileSystem.get(conf2).deleteOnExit(new Path("temp"));
    FileSystem.get(conf3).deleteOnExit(new Path("freq_temp"));
    FileSystem.get(conf4).deleteOnExit(new Path("freq_temp2"));

    System.out.println("Program complete.");
    return 0;
}

更新:我正在使用Hadoop 1.2.1,因为我在学校使用群集,所以我只能使用它。

更新:使用configure而不是setup,但仍然无法调用。

public void configure(JobConf jobConf) {

    System.out.println("Setting up system..");

    Path[] cacheFilesLocal;
    try {
        cacheFilesLocal = DistributedCache.getLocalCacheFiles(jobConf);

        for (Path eachPath : cacheFilesLocal) {
            if (eachPath.getName().toString().trim().equals("u.item")) {

                loadMovieNamesHashMap(eachPath);

            }
        }
    } catch (IOException e) {
        e.printStackTrace();
    }

}

在run方法中添加了以下内容。

DistributedCache.addFileToClassPath(new Path("moviedata"), conf4);
conf4.set("mapred.job.tracker", "local");

4 个答案:

答案 0 :(得分:0)

如果您的IDE支持它,请让您的IDE覆盖超类中的方法(在Eclipse中它的源 - &gt;覆盖/实现方法)以查看IDE是否认为您的类型(上下文)错误。如果你弄错了,那么Eclipse将允许你覆盖方法,插入一个带有正确签名的存根。

准确地说,您需要决定是否使用mapred(旧)或map reduce(new)包。您似乎正在使用mapred包(请注意,Context是从错误的包导入的)。如果要使用mapred包,请使用configure()方法,否则使用setup()作为mapreduce包

答案 1 :(得分:0)

您必须使用configure方法:

public void configure(JobConf job) {

   }

Documentation

中未定义设置

答案 2 :(得分:0)

---替代解决方案---

我仍然无法弄明白。在任何地图调用之前,似乎在mapper开头调用设置方法的模型可能只是新API的一部分(mapred vs mapreduce)。

我想对多个映射器使用相同的map方法,只有一个变量差异。 无法覆盖变量,因此我在map方法的开头调用pulic void setup(),在子映射器中覆盖它。当然,每次调用地图都会调用它(例如,这些映射器的输入文件中的每一行),但这是我目前效率低下的效率最低的。

public static class Mapper1
    extends MapReduceBase
    implements Mapper<LongWritable, Text, Text, Text>
{
    protected int someVar;

    public void setup()
    {
        System.out.println("[LOG] setup called");
        someVar = 1;
    }

    public void map(
        LongWritable key,
        Text value,
        OutputCollector<Text, Text> output,
        Reporter reporter
    ) throws IOException
    {
        setup();
        System.out.println("someVar: " + String.valueOf(someVar));
        //...
        output.collect(someKey, someValue);
    }
}

public static class Mapper3
    extends Mapper1
{
    //protected int someVar;
    //private int someVar;

    /*
    @Override
    public void setup(Context context)
        throws IOException, InterruptedException
    {
        System.out.println("[LOG] setup called");
        someVar = 2;
    }
    @Override
    public void configure(JobConf jobConf)
    {
        System.out.println("[LOG] configure called");
        someVar = 2;
    }
    */
    @Override
    public void setup()
    {
        System.out.println("[LOG] setup called");
        someVar = 2;
    }
}

答案 3 :(得分:0)

我有一个在Hadoop 1.2.1上运行的代码(也在2.2.0上测试过),它广泛使用了安装程序。这就是我的代码中的样子:

    @Override
    public void setup(Context context) throws IllegalArgumentException, IOException {
        logger.debug("setup has been called");
    }

我看到的区别在于使用&#34; public&#34;而不是&#34;受保护&#34;并且还使用@Override来帮助您确定是否未正确覆盖该方法。另请注意,我使用的是新API(org.apache.hadoop.mapreduce)。

相关问题