GroupBy之后的条件选择

时间:2016-01-20 11:13:49

标签: c# linq lambda

如何根据Where声明在GroupBySelect个项目之后编写条件if

我有Option类型的对象列表:

class Option {
    public Boolean Important { get; set; }
    public int Priority { get; set; }
    public String Value { get; set; }
    public String Name { get; set; }
}

我初始化我的选项和包含所有选项的列表:

List<Option> myOptions = new List<Option> {
    new Option { Priority = 100, Name = "Color", Value = "Red", Important = true },
    new Option { Priority = 150, Name = "Color", Value = "Blue" },
    new Option { Priority = 100, Name = "Font", Value = "16" },
    new Option { Priority = 150, Name = "Font", Value = "32"
};

所以我有类似的东西:

///MY Options///
/// NAME --- VALUE --- PRIORITY --- IMPORTANT
/// Color -- Red ----- 100----------True
/// Color -- Blue ---- 150----------False
/// Font-----16--------100----------False
/// Font-----32--------150----------False
/// 

我需要按名称对它们进行分组,并根据两个语句获取这些值:

  1. 选择最优先的Important为真。
  2. 如果没有Important的项目,请选择最优先的项目。
  3. 所以目标应该是:

        ///Color - Red  //Because has the most priority being Important
        ///Font - 32    //Because has the most priority and there's no Important
    

    我正在尝试避免多次迭代,所以我正在构建一个Linq查询......没有成功。

    .Select(g => new {
        Name = g.Key,
         Value = g.Where(op => op.Important).OrderByDescending(op => op.Priority).First().Value
    }).ToList();
    

    我不知道如何解决Where。有什么想法吗?

1 个答案:

答案 0 :(得分:7)

听起来你真的不希望过滤按“重要”,但是命令

import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

class skipwc_mapper extends
            Mapper<LongWritable, Text, Text, IntWritable> {

        protected void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {

            String line = value.toString();
            StringTokenizer t = new StringTokenizer(line);
            Text word = null;
            while (t.hasMoreTokens()) {
                word = new Text(t.nextToken());
                context.write(word, new IntWritable(1));
            }
        }
    }

    class skipwc_reducer extends
            Reducer<Text, IntWritable, Text, IntWritable> {
        protected void reduce(Text key, Iterable<IntWritable> values,
                Context context) throws IOException, InterruptedException {
            int tot = 0;
            if (key.toString() != "in" && key.toString() != "of") {
                while (values.iterator().hasNext()) {
                    tot += values.iterator().next().get();
                }
                context.write(key, new IntWritable(tot));
            }
        }
    }

    public static class skipwc_runner {
        public static void main(String[] args) throws IOException,
                InterruptedException, ClassNotFoundException {
            Configuration conf = new Configuration();
            Job job = new Job(conf);
            job.setJarByClass(skipwc_runner.class);

            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(IntWritable.class);

            job.setMapperClass(skipwc_mapper.class);
            job.setReducerClass(skipwc_reducer.class);

            job.setInputFormatClass(TextInputFormat.class);
            job.setOutputFormatClass(TextOutputFormat.class);

            FileInputFormat.addInputPath(job, new Path(args[0]));
            FileOutputFormat.setOutputPath(job, new Path(args[1]));

            System.exit(job.waitForCompletion(true) ? 0 : 1);
        }
    }
}