hadoop CustomInputFormat未被调用

时间:2013-12-06 07:29:07

标签: hadoop mapreduce

我编写了一个自定义输入格式并在作业中进行了配置。仍然没有调用inputformat。在运行代码时我保留了一些SOP来打印,但没有一个正在打印。即使我在驱动程序类中注释自定义inputformat仍然输出仍然相同。我在哪里错过了?

DriverClass

public class TestDriver {

    public static void main(String args[]) throws IOException, InterruptedException, ClassNotFoundException{

        Configuration conf = new Configuration();
        Job job = new Job(conf,"Custom Format");
        job.setMapperClass(CustomInputFormatmapper.class);
        job.setReducerClass(CustomInputFormatReducer.class);
        job.setInputFormatClass(CustomInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        job.setMapOutputKeyClass(LongWritable.class);
        job.setMapOutputValueClass(LongWritable.class);
        job.getConfiguration().set("fs.file.impl", "com.learn.WinLocalFileSystem");
        String inputPath="In\\VISA_Details.csv";
        Path inPath=new Path(inputPath);
        String outputPath = "C:\\Users\\Desktop\\Hadoop learning\\output\\run1";
        Path outPath=new Path(outputPath);

        FileInputFormat.setInputPaths(job, inPath );
        FileOutputFormat.setOutputPath(job, outPath);

        System.out.println(job.waitForCompletion(true));


    }
}

CUSTOM INPUTFORMAT

    import org.apache.hadoop.mapred.TaskAttemptContext;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;

public class CustomInputFormat extends TextInputFormat{

    public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context)
    {
        System.out.println(" ------------ INSIDE createRecordReader()--------------");
        return new CustomRecordReader();
    }
}

CUSTOM RECORDREADER

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.LineReader;

public class CustomRecordReader extends RecordReader {

    private CompressionCodecFactory compressionCodecs;
    private final int NLINESTOPROCESS = 3;
    private long start;
    private long pos;
    private long end;
    private LineReader in;
    private int maxLineLength;
    private LongWritable key;
    private Text value;

    @Override
    public void close() throws IOException {
        // TODO Auto-generated method stub

    }

    @Override
    public Object getCurrentKey() throws IOException, InterruptedException {
        // TODO Auto-generated method stub
        return null;
    }

    @Override
    public Object getCurrentValue() throws IOException, InterruptedException {
        // TODO Auto-generated method stub
        return null;
    }

    @Override
    public float getProgress() throws IOException, InterruptedException {
        // TODO Auto-generated method stub
        return 0;
    }

    @Override
    public void initialize(InputSplit inputsplit,TaskAttemptContext taskattemptcontext) 
            throws IOException, InterruptedException {
        System.out.println(" ---------- INSIDE INITILISE:  THIS IS NOT PRINTING----------");
        FileSplit split = (FileSplit)inputsplit;
        Configuration job = taskattemptcontext.getConfiguration();
        maxLineLength = job.getInt("mapred.linerecordreader.maxlength", 2147483647);
        start = split.getStart();
        end = start + split.getLength();
        Path file = split.getPath();
        compressionCodecs = new CompressionCodecFactory(job);
        CompressionCodec codec = compressionCodecs.getCodec(file);
        FileSystem fs = file.getFileSystem(job);
        FSDataInputStream fileIn = fs.open(split.getPath());
        boolean skipFirstLine = false;
        if(codec != null)
        {
            in = new LineReader(codec.createInputStream(fileIn), job);
            end = 9223372036854775807L;
        } else
        {
            if(start != 0L)
            {
                skipFirstLine = true;
                start--;
                fileIn.seek(start);
            }
            in = new LineReader(fileIn, job);
        }
        if(skipFirstLine)
            start += in.readLine(new Text(), 0, (int)Math.min(2147483647L, end - start));
        pos = start;

    }

    @Override
    public boolean nextKeyValue() throws IOException, InterruptedException {

        System.out.println(" ---------- INSIDE nextKeyValue()------------");
        if(key==null){
            key = new LongWritable();
        }
        if(value==null){
            value = new Text();
        }
        key.set(pos);
        value.clear();

        final Text newLine = new Text("\n");
        Text newVal = new Text();
         int newSize = 0;

        for(int i =0;i<NLINESTOPROCESS;i++){
             Text v = new Text();

             while(pos<end){
                 newSize = in.readLine(v, maxLineLength,Math.max((int)Math.min(Integer.MAX_VALUE, end-pos),maxLineLength));
                 value.append(v.getBytes(),0, v.getLength());
                 value.append(newLine.getBytes(),0, newLine.getLength());

                 if (newSize == 0) {
                        break;
                    }
                    pos += newSize;
                    if (newSize < maxLineLength) {
                        break;
                    }

             }
        }


        return false;
    }

}

MAPPER CLASS

    import java.io.IOException;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class CustomInputFormatmapper extends Mapper<LongWritable, Text, LongWritable, LongWritable> {

    public void map(LongWritable key, Text val, Context context)throws IOException, InterruptedException{

        String value = val.toString();
        String[] totalRows = value.split("\n");
        int count =totalRows.length;

        context.write(new LongWritable(Long.valueOf(count)), new LongWritable(1L));

    }
}

REDUCER CLASS

    import java.io.IOException;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Reducer;

public class CustomInputFormatReducer extends Reducer<LongWritable, LongWritable, LongWritable, LongWritable> {

    public void reduce(LongWritable key, Iterable<LongWritable> val, Context context) throws IOException, InterruptedException{
        System.out.println(" --------REDUCER--------");
        long count =0;
        for(LongWritable vals: val){
            count++;
        }
        context.write(key, new LongWritable(count));
    }

}

1 个答案:

答案 0 :(得分:0)

我正在回答我自己的问题,因为这将有助于其他人解决我所遇到的问题。我导入的包有问题。 提到我所犯的错误。

CUSTOMINPUTFORMAT CLASS

1)错过了@Override注释 2)从import org.apache.hadoop.mapred.InputSplit导入而不是org.apache.hadoop.mapreduce.InputSplit;

CUSTOMRECORDREADER

1)导入是从org.apache.hadoop.mapred。*进行的,而不是来自org.apache.hadoop.mapreduce。*;

相关问题