Hadoop使用Datajoin减少了侧连接

时间:2012-04-18 01:17:51

标签: hadoop

我正在使用以下代码执行reduce side join

/*
 * HadoopMapper.java
 *
 * Created on Apr 8, 2012, 5:39:51 PM
 */


import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
// import org.apache.commons.logging.Log;
// import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.contrib.utils.join.*; 

/**
 *
 * @author 
 */
public class DataJoin extends Configured implements Tool 
    {
        public static class MapClass extends DataJoinMapperBase 
            {
                protected Text generateInputTag(String inputFile) 
                    {
                        String datasource = inputFile.split("-")[0];
                        return new Text(datasource);
                    }
            protected Text generateGroupKey(TaggedMapOutput aRecord) 
                {
                    String line = ((Text) aRecord.getData()).toString();
                    String[] tokens = line.split(",");
                    String groupKey = tokens[0];
                    return new Text(groupKey);
                }
            protected TaggedMapOutput generateTaggedMapOutput(Object value) 
                {
                    TaggedWritable retv = new TaggedWritable((Text) value);
                    retv.setTag(this.inputTag);
                    return retv;
                }
            }
        public static class Reduce extends DataJoinReducerBase 
            {
                protected TaggedMapOutput combine(Object[] tags, Object[] values) 
                    {
                        if (tags.length < 2) return null;
                        String joinedStr = "";
                        for (int i=0; i<values.length; i++) 
                        {
                            if (i > 0) joinedStr += ",";
                            TaggedWritable tw = (TaggedWritable) values[i];
                            String line = ((Text) tw.getData()).toString();
                            String[] tokens = line.split(",", 2);
                            joinedStr += tokens[1];
                        }
                        TaggedWritable retv = new TaggedWritable(new Text(joinedStr));
                        retv.setTag((Text) tags[0]);
                        return retv;
                    }
            }
        public static class TaggedWritable extends TaggedMapOutput 
            {
                private Writable data;
                public TaggedWritable(Writable data) 
                    {
                        this.tag = new Text("");
                        this.data = data;
                    }

                public Writable getData() 
                    {
                        return data;
                    }
                public void write(DataOutput out) throws IOException
                    {
                        this.tag.write(out);
                        this.data.write(out);
                    }
                public void readFields(DataInput in) throws IOException 
                    {
                        this.tag.readFields(in);
                        this.data.readFields(in);
                    }
            }
        public int run(String[] args) throws Exception 
            {


                                Configuration conf = getConf();
                JobConf job = new JobConf(conf, DataJoin.class);
                                String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
                                if (otherArgs.length != 2) 
                                {
                                  System.err.println("Usage: wordcount <in> <out>");
                                  System.exit(2);
                                }

                Path in = new Path(args[0]);
                Path out = new Path(args[1]);
                FileInputFormat.setInputPaths(job, in);
                FileOutputFormat.setOutputPath(job, out);
                job.setJobName("DataJoin");
                job.setMapperClass(MapClass.class);
                job.setReducerClass(Reduce.class);
                job.setInputFormat(TextInputFormat.class);
                job.setOutputFormat(TextOutputFormat.class);
                job.setOutputKeyClass(Text.class);
                job.setOutputValueClass(TaggedWritable.class);
                job.set("mapred.textoutputformat.separator", ",");
                JobClient.runJob(job);
                return 0;
            }
        public static void main(String[] args) throws Exception 
            {
                int res = ToolRunner.run(new Configuration(),
                new DataJoin(),
                args);
                System.exit(res);
            }
    }

我能够编译我的代码。当我在hadoop中运行时,我在组合器中遇到以下错误

12/04/17 19:59:29 INFO mapred.JobClient:  map 100% reduce 27%
12/04/17 19:59:38 INFO mapred.JobClient:  map 100% reduce 30%
12/04/17 19:59:47 INFO mapred.JobClient:  map 100% reduce 33%
12/04/17 20:00:23 INFO mapred.JobClient: Task Id : attempt_201204061316_0018_r_000000_2, Status : FAILED
java.lang.RuntimeException: java.lang.NoSuchMethodException: DataJoin$TaggedWritable.<init>()
        at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:115)
        at org.apache.hadoop.io.serializer.WritableSerialization$WritableDeserializer.deserialize(WritableSerialization.java:62)
        at org.apache.hadoop.io.serializer.WritableSerialization$WritableDeserializer.deserialize(WritableSerialization.java:40)
        at org.apache.hadoop.mapred.Task$ValuesIterator.readNextValue(Task.java:1136)
        at org.apache.hadoop.mapred.Task$ValuesIterator.next(Task.java:1076)
        at org.apache.hadoop.mapred.ReduceTask$ReduceValuesIterator.moveToNext(ReduceTask.java:246)
        at org.apache.hadoop.mapred.ReduceTask$ReduceValuesIterator.next(ReduceTask.java:242)
        at org.apache.hadoop.contrib.utils.join.DataJoinReducerBase.regroup(DataJoinReducerBase.java:106)

我用来运行hadoop的命令是     / hadoop / core / bin / hadoop jar /export/scratch/lopez/Join/DataJoin.jar DataJoin / export / scratch / user / lopez / Join / export / scratch / user / lopez / Join_Output

并且DataJoin.jar文件中包含DataJoin $ TaggedWritable

我检查了一些论坛,发现由于非静态类可能会发生错误。我的程序没有非静态类!

有人可以帮助我吗


谢谢你克里斯我编辑你说的。我更新了我的代码以获取两个文件。但我得到相同的错误信息

我收到同样的消息 INFO mapred.FileInputFormat:要处理的总输入路径:2

错误是

     Status : FAILED
    java.lang.ArrayIndexOutOfBoundsException: 1
    at DataJoin$Reduce.combine(DataJoin.java:69)
    at org.apache.hadoop.contrib.utils.join.DataJoinReducerBase.joinAndCollect(DataJoinReducerBase.java:205)
    at org.apache.hadoop.contrib.utils.join.DataJoinReducerBase.joinAndCollect(DataJoinReducerBase.java:214)
    at org.apache.hadoop.contrib.utils.join.DataJoinReducerBase.joinAndCollect(DataJoinReducerBase.java:214)
    at org.apache.hadoop.contrib.utils.join.DataJoinReducerBase.joinAndCollect(DataJoinReducerBase.java:181)
    at org.apache.hadoop.contrib.utils.join.DataJoinReducerBase.reduce(DataJoinReducerBase.java:135)
    at org.apache.hadoop.mapred.ReduceTask.runOldReducer(ReduceTask.java:468)



{


    Configuration conf = getConf();
    JobConf job = new JobConf(conf, DataJoin.class);
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length != 3) 
    {
      System.err.println("Usage: wordcount <in> <in1> <out>");
      System.exit(2);
    }

    Path in = new Path(args[0]);
    Path in1 = new Path(args[1]);
    Path out = new Path(args[2]);
    FileInputFormat.setInputPaths(job,in,in1);
    FileOutputFormat.setOutputPath(job, out);
    job.setJobName("DataJoin");
    job.setMapperClass(MapClass.class);
    job.setReducerClass(Reduce.class);
    job.setInputFormat(TextInputFormat.class);
    job.setOutputFormat(TextOutputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(TaggedWritable.class);
    job.set("mapred.textoutputformat.separator", ",");
    JobClient.runJob(job);
    return 0;

}

2 个答案:

答案 0 :(得分:6)

您需要TaggedWritable的默认构造函数(Hadoop使用反射来创建此对象,并且需要默认构造函数(无args)。

你的readFields方法也有问题,你在可写接口上调用data.readFields(in) - 但不知道data的实际运行时类。

我建议您在输出数据对象本身之前写出数据类名,或者查看GenericWritable类(您需要扩展它以定义可以使用的可允许可写类的集合)。

所以你可以修改如下:

public static class TaggedWritable extends TaggedMapOutput {
    private Writable data;

    public TaggedWritable() {
        this.tag = new Text();
    }

    public TaggedWritable(Writable data) {
        this.tag = new Text("");
        this.data = data;
    }

    public Writable getData() {
        return data;
    }

    public void setData(Writable data) {
        this.data = data;
    }

    public void write(DataOutput out) throws IOException {
        this.tag.write(out);
        out.writeUTF(this.data.getClass().getName());
        this.data.write(out);
    }

    public void readFields(DataInput in) throws IOException {
        this.tag.readFields(in);
        String dataClz = in.readUTF();
        if (this.data == null
                || !this.data.getClass().getName().equals(dataClz)) {
            this.data = (Writable) ReflectionUtils.newInstance(
                    Class.forName(dataClz), null);
        }
        this.data.readFields(in);
    }
}

答案 1 :(得分:0)

由于您的代码仅使用Text,因此在TaggedWritable中链接默认构造函数应该:

public TaggedWritable() {
    this(new Text(""));
}
相关问题