排序hadoopmapreduce

zzoitvuj  于 2021-06-02  发布在  Hadoop
关注(0)|答案(1)|浏览(268)

我有下面的算法按字母顺序对数据排序

public void setup(Context context) throws IOException,
        InterruptedException {
      conf = context.getConfiguration();
      caseSensitive = conf.getBoolean("amasort.case.sensitive", true);

    }

    @Override
    public void map(Object key, Text value, Context context
                    ) throws IOException, InterruptedException {
      String line = (caseSensitive) ? value.toString() : value.toString().toLowerCase();
      word.set(line+"_"+key.toString());
      context.write(word, one);
      System.out.println("key:"+key.toString()+";value:"+value.toString());
      }
    }

  public static class ForwardReducer
       extends Reducer<Text,NullWritable,Text,NullWritable> {
    private NullWritable result = NullWritable.get();

    public void reduce(Text key, Iterable<NullWritable> values,
                       Context context
                       ) throws IOException, InterruptedException {

      String originalWord = key.toString();
      originalWord = originalWord.substring(0, originalWord.lastIndexOf("_"));
      key.set(originalWord);
      context.write(key, result);
    }
  }

  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    GenericOptionsParser optionParser = new GenericOptionsParser(conf, args);
    String[] remainingArgs = optionParser.getRemainingArgs();
    Job job = Job.getInstance(conf, "word sort");
    job.setJarByClass(AmaSort.class);
    job.setMapperClass(LineMapper.class);
//    job.setCombinerClass(ForwardReducer.class);
    job.setReducerClass(ForwardReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NullWritable.class);

    FileInputFormat.addInputPath(job, new Path(remainingArgs[0]));
    FileOutputFormat.setOutputPath(job, new Path(remainingArgs[1]));

    System.exit(job.waitForCompletion(true) ? 0 : 1);

我尝试了这个算法来对包含(@x,0,tcp,xx,1,1,1,2,4,5,…)的mydata集进行排序,但是以@开头的输出行被删除,并且数据行结构0,tcp,x1x1x1,1114,。。。。如果修改了,我只想用这个特定的字符(@)对数据集进行排序,所有行都以@开头,其余的保持相同的结构。有人能帮我修改这个算法吗?

xe55xuns

xe55xuns1#

您可以使用下面修改的代码来执行排序,

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class AmaSort
{
    static Configuration conf = null;
    private static boolean caseSensitive;
    private static Text word = new Text();

    public static class LineMapper extends Mapper<Object, Text, Text, NullWritable>{

        public void setup(Context context) throws IOException, InterruptedException
        {
            conf = context.getConfiguration();
            caseSensitive = conf.getBoolean("amasort.case.sensitive", true);

        }

        @Override
        public void map(Object key, Text value, Context context) throws IOException, InterruptedException
        {
            String line = (caseSensitive) ? value.toString() : value.toString().toLowerCase();
            word.set(line);
            context.write(word, NullWritable.get());

        }
    }

    public static class ForwardReducer extends Reducer<Text, NullWritable, Text, NullWritable>
    {
        private NullWritable result = NullWritable.get();

        public void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException
        {
            context.write(key, result);
        }
    }

    public static void main(String[] args) throws Exception
    {
    Configuration conf = new Configuration();
    GenericOptionsParser optionParser = new GenericOptionsParser(conf, args);
    String[] remainingArgs = optionParser.getRemainingArgs();
//  Job job = Job.getInstance(conf, "word sort");
    Job job = new Job(conf, "word sort");
    job.setJarByClass(AmaSort.class);
    job.setMapperClass(LineMapper.class);
    // job.setCombinerClass(ForwardReducer.class);
    job.setReducerClass(ForwardReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NullWritable.class);

    FileInputFormat.addInputPath(job, new Path(remainingArgs[0]));
    FileOutputFormat.setOutputPath(job, new Path(remainingArgs[1]));

    System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

}

相关问题