java map reduce中的select distinct查询

drnojrws  于 2021-05-30  发布在  Hadoop
关注(0)|答案(3)|浏览(438)
10001|76884|1995-06-24|1996-06-23
10001|76884|1995-06-24|1996-06-23
10001|75286|1993-06-24|1994-06-24

我的目标是删除dup值,输出如下

10001|76884|1995-06-24|1996-06-23
10001|75286|1993-06-24|1994-06-24

我写了一个代码如下

import java.io.IOException;
import java.util.*;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.conf.*;

import org.apache.hadoop.io.*;

import org.apache.hadoop.mapred.JobClient;

import org.apache.hadoop.mapreduce.*;

import org.apache.hadoop.mapreduce.Mapper.Context;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

public class charterSelDistRec {

        public static class Map extends Mapper <LongWritable, Text, Text, Text> {
            private String tableKey,tableValue;

            public void map(LongWritable key, Text value, Context context)
 throws IOException, InterruptedException {

                    String line = value.toString();
                    String splitarray[] = line.split("\\|",2);
                    tableKey = splitarray[0].trim();
                    tableValue = splitarray[1].trim();

                    context.write(new Text(tableKey), new Text(tableValue));     
                }
        }               

        public static class Reduce extends Reducer <Text, Text, Text, Text> {                         
            public void reduce(Text key, Iterator<Text> values, Context context) 
                      throws IOException, InterruptedException {
                    String ColumnDelim="";
                    String tableOutValue=ColumnDelim+values;
                    context.write(new Text(key), new Text(tableOutValue));

                }
        }

        public static void main(String[] args) throws Exception {
                Configuration conf = new Configuration();
                Job job = new Job(conf,"charterSelDistRec");
                job.getConfiguration().set("mapreduce.job.queuename", "root.Dev");
                job.getConfiguration().set("mapreduce.output.textoutputformat.separator","|");
                job.setJobName("work_charter_stb.ext_chtr_vod_fyi_mapped");
                job.setOutputKeyClass(Text.class);
                job.setOutputValueClass(Text.class);

                job.setMapperClass(Map.class);

                job.setReducerClass(Reduce.class);

                job.setInputFormatClass(TextInputFormat.class);
                job.setOutputFormatClass(TextOutputFormat.class);

                FileInputFormat.addInputPath(job, new Path(args[0]));
                FileOutputFormat.setOutputPath(job, new Path(args[1]));
                job.setJarByClass(charterSelDistRec.class); 
                job.waitForCompletion(true);
          }
      }

但是输出文件仍然有dup。请务必告诉我哪里错了。

bn31dyow

bn31dyow1#

试试这个。这样做的目的是只发出iterable的第一个值,因为它们都是相同的,并且您希望删除dup值。

import java.io.IOException;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

public class charterSelDistRec {

    public  static class MyMapper extends Mapper<LongWritable, Text, Text, Text> {

        @Override
        public void map(LongWritable ignore, Text value, Context context)
            throws IOException, InterruptedException {
            context.write(value, value);
        }  
    }

    public static class MyReducer extends Reducer<Text, Text, Text, NullWritable> {    
      @Override
      public void reduce(Text key, Iterable<Text> values, Context context)
          throws IOException, InterruptedException {
          for (Text value : values){
              context.write(value, NullWritable.get());
              break;
          }
      }
    }       

  /* This is your main. Changed the outputValueClass method only */
  public static void main(String[] args) throws Exception {
      Configuration conf = new Configuration();
      Job job = new Job(conf,"charterSelDistRec");
      job.getConfiguration().set("mapreduce.job.queuename", "root.Dev");
      job.getConfiguration().set("mapreduce.output.textoutputformat.separator","|");
      job.setJobName("work_charter_stb.ext_chtr_vod_fyi_mapped");
      job.setOutputKeyClass(Text.class);
      job.setOutputValueClass(NullWritable.class);

      job.setMapperClass(Map.class);

      job.setReducerClass(Reduce.class);

      job.setInputFormatClass(TextInputFormat.class);
      job.setOutputFormatClass(TextOutputFormat.class);

      FileInputFormat.addInputPath(job, new Path(args[0]));
      FileOutputFormat.setOutputPath(job, new Path(args[1]));
      job.setJarByClass(charterSelDistRec.class); 
      job.waitForCompletion(true);
   }
}
eh57zj3b

eh57zj3b2#

第一行有两条记录,第二行有一条记录。一旦在map中读取完毕,就可以基于|进行拆分,但行(实体)是用空格分隔的,如我所见。只要验证一下实际数据是否是这样。传统的格式是,将每一行(实体)放在一个单独的行中,并且map reduce在map阶段之后过滤唯一的键。一旦你的输入是这种格式,你在reducer中得到的就是唯一的键。
如果您的输入有任何不同(比如在同一行中有2条以上的记录),您需要考虑不同的输入格式,或者以不同的方式处理逻辑。了解map reduce的工作原理及其所采用的格式将对您有更多帮助。快乐学习

ubbxdtey

ubbxdtey3#

不必这么复杂。你所要做的就是:
在mapper中,将每一行作为键和任何值发出
在reducer中,只需发出键并忽略值。
共享代码:
以下是输入:

10001|76884|1995-06-24|1996-06-23
10001|76884|1995-06-24|1996-06-23
10001|75286|1993-06-24|1994-06-24

代码如下:

public class StackRemoveDup {

    public  static class MyMapper extends Mapper<LongWritable,Text, Text, NullWritable> {

        @Override
        public void map(LongWritable ignore, Text value, Context context)
            throws java.io.IOException, InterruptedException {
            context.write(value,NullWritable.get());
        }  
    }

    public static class MyReducer extends Reducer<Text, NullWritable, Text, NullWritable> {

      @Override
      public void reduce(Text key, Iterable<NullWritable> values, Context context)
          throws IOException, InterruptedException {
        context.write(key, NullWritable.get());
      }
    }       

  public static void main(String[] args) 
                  throws IOException, ClassNotFoundException, InterruptedException {

    Job job = new Job();
    job.setJarByClass(StackRemoveDup.class);
    job.setJobName("StackRemoveDup");

    job.setMapperClass(MyMapper.class);
    job.setReducerClass(MyReducer.class);

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NullWritable.class);

    job.waitForCompletion(true);
  }
}

以下是输出:

10001|75286|1993-06-24|1994-06-24
10001|76884|1995-06-24|1996-06-23

相关问题