如何在setup()中初始化实例变量?

时间:2014-01-21 04:02:54

标签: hadoop

这应该是一个简单的问题,但我正在努力解决它。除了初始化从配置文件中读入的“train_rows”和“cols”的参数值外,我的代码中的所有内容都正常工作。

我设置日志记录以在setup()方法中显示“train_rows”和“cols”的值,并且值是正确的。但是,当我在map()方法中尝试相同的事情时,两个值都显示为0.我做错了什么?

import java.io.File;     import java.io.IOException;     import java.io.FileNotFoundException;     import java.util.Scanner;     import org.apache.log4j.Logger;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class KNNMapper extends Mapper<LongWritable, Text, IntWritable, IntWritable> {
  private static final Logger sLogger = Logger.getLogger(KNNMapper.class);
  private int[][] train_vals;
  private int[] train_label_vals;
  private int train_rows;
  private int test_rows;
  private int cols;

  @Override
  public void setup(Context context) throws IOException, InterruptedException {
      Configuration conf = context.getConfiguration();

      train_rows = conf.getInt("rows", -1);
      cols = conf.getInt("columns", -1);

      //just changed this
      //int[][] train_vals = new int[train_rows][cols];
      //int[] train_label_vals = new int[train_rows];

      train_vals = new int[train_rows][cols];
      train_label_vals = new int[train_rows];

      // read train csv, parse, and store into 2d int array
      Scanner myScan;
        try {
            File trainfile = new File("train_sample.csv");
            if (!trainfile.exists()) {
                throw new IllegalArgumentException("train file didn't load");
            }
            myScan = new Scanner(trainfile);

            //Set the delimiter used in file
            myScan.useDelimiter("[,\r\n]+");

            //Get all tokens and store them in some data structure
            //I am just printing them

            for(int row = 0; row < train_rows; row++) {
                for(int col = 0; col < cols; col++) {
                    train_vals[row][col] = Integer.parseInt(myScan.next().toString());
                }
            }

            myScan.close();

        } catch (FileNotFoundException e) {
            System.out.print("Error: Train file execution did not work.");
        }

    // read train_labels csv, parse, and store into 2d int array
        try {
            File trainlabels = new File("train_labels.csv");
            if (!trainlabels.exists()) {
                throw new IllegalArgumentException("train labels didn't load");
            }

            myScan = new Scanner(trainlabels);

            //Set the delimiter used in file
            myScan.useDelimiter("[,\r\n]+");

            //Get all tokens and store them in some data structure
            //I am just printing them

            for(int row = 0; row < train_rows; row++) {
                    train_label_vals[row] = Integer.parseInt(myScan.next().toString());
                    if(row < 10) {
                        System.out.println(train_label_vals[row]);
                    }
            }

            myScan.close();

        } catch (FileNotFoundException e) {
            System.out.print("Error: Train Labels file not found.");
        }
  }

  @Override
  public void map(LongWritable key, Text value, Context context)
      throws IOException, InterruptedException {

        // setup() gave us train_vals & train_label_vals.
        // Each line in map() represents a test observation.  We iterate 
        // through every train_val row to find nearest L2 match, then
        // return a key/value pair of <observation #, 

        // convert from Text to String

        System.out.println("I'm in the map!");
        String line = value.toString();
        double distance;
        double best_distance = Double.POSITIVE_INFINITY;
        int col_num;

        int best_digit = -1;
        IntWritable rowId = null;
        int i;
        IntWritable rowNum;
        String[] pixels;

        System.out.println("Number of train rows:" + train_rows);
        System.out.println("Number of columns:" + cols);
        // comma delimited files, split on commas
        // first we find the # of rows

        pixels = line.split(",");
        rowId = new IntWritable(Integer.parseInt(pixels[0]));
        System.out.println("working on row " + rowId);
        best_distance = Double.POSITIVE_INFINITY;

        for (i = 0; i < train_rows; i++) {
            distance = 0.0;

            col_num = 0;

            for (int j = 1; j < cols; j++) {
                distance += (Integer.parseInt(pixels[j]) - train_vals[i][j-1])^2;
            }

            if (distance < best_distance) {
                best_distance = distance;
                best_digit = train_label_vals[i];
            }
        }
        System.out.println("And we're out of the loop baby yeah!");
        context.write(rowId, new IntWritable(best_digit));
        System.out.println("Mapper done!");
  }
}

1 个答案:

答案 0 :(得分:0)

我怀疑这一点,假设您正在使用hdfs扫描文件。

你使用过:

import java.io.File;文件trainfile =新文件(“train_sample.csv”);

在hadoop中这是我们在hdfs中检查文件的方式:

尝试{     FileSystem fs = FileSystem.get(context.getConfiguration());

if (fs.exists(new Path("/user/username/path/of/file/inhdfs"))) {
        System.out.println("File exists");
}

 } catch (IOException e) {
e.printStackTrace();

}