reducer将Map器输出写入输出文件

toe95027  于 2021-06-02  发布在  Hadoop
关注(0)|答案(1)|浏览(438)

我正在学习hadoop并尝试执行mapreduce程序。所有Map任务和reducer任务都可以很好地完成,但是reducer会将Map器输出写入输出文件。这意味着reduce函数根本没有被调用。我的输入示例如下

1,a
1,b
1,c
2,s
2,d

预期输出如下

1 a,b,c
2 s,d

下面是我的程序。

package patentcitation;
import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class MyJob
{
        public static class Mymapper extends Mapper <Text, Text, Text, Text>
        {
                public void map (Text key, Text value, Context context) throws IOException, InterruptedException
                {
                        context.write(key, value);
                }

        }
        public static class Myreducer extends Reducer<Text,Text,Text,Text>
        {

                StringBuilder str = new StringBuilder();

                public void reduce(Text key, Iterable<Text> value, Context context) throws IOException, InterruptedException
                {
                        for(Text x : value)
                        {
                                if(str.length() > 0)
                                {
                                        str.append(",");
                                }
                                str.append(x.toString());
                        }
                        context.write(key, new Text(str.toString()));
                }

        }
        public static void main(String args[]) throws IOException, ClassNotFoundException, InterruptedException
        {
                Configuration conf = new Configuration();
                Job job = Job.getInstance(conf, "PatentCitation");
                FileSystem fs = FileSystem.get(conf);
                job.setJarByClass(MyJob.class);
                FileInputFormat.addInputPath(job,new Path(args[0]));
                FileOutputFormat.setOutputPath(job, new Path(args[1]));
                job.setMapperClass(Mymapper.class);
                job.setReducerClass(Myreducer.class);
                 job.setMapOutputKeyClass(Text.class);
              job.setMapOutputValueClass(Text.class);
                job.setInputFormatClass(KeyValueTextInputFormat.class);
                job.setOutputKeyClass(Text.class);
                job.setOutputValueClass(Text.class);
                conf.set("mapreduce.input.keyvaluelinerecordreader.key.value.separator",",");
                if(fs.exists(new Path(args[1]))){
                   //If exist delete the output path
                   fs.delete(new Path(args[1]),true);
                }
                System.exit(job.waitForCompletion(true) ? 0 : 1);
        }
}

这里也有同样的问题,我在reduce函数中使用了iterable值作为该线程中建议的答案。但这并不能解决问题。我不能在那里发表评论,因为我的声誉得分很低。因此创建了新线程
请帮助我哪里做错了。

kqhtkvqz

kqhtkvqz1#

你的程序中几乎没有犯错误。错误如下:
在驱动程序中,在示例化 Job 班级: conf.set("mapreduce.input.keyvaluelinerecordreader.key.value.separator",","); 在减速机中,你应该把 StringBuilder 内部 reduce() 功能。
我修改了你的代码如下,我得到了输出:

E:\hdp\hadoop-2.7.1.2.3.0.0-2557\bin>hadoop fs -cat /out/part-r-00000
1       c,b,a
2       d,s

修改代码:

package patentcitation;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class MyJob
{
    public static class Mymapper extends Mapper <Text, Text, Text, Text>
    {
        public void map(Text key, Text value, Context context) throws IOException, InterruptedException
        {
                context.write(key, value);
        }

    }
    public static class Myreducer extends Reducer<Text,Text,Text,Text>
    {

        public void reduce(Text key, Iterable<Text> value, Context context) throws IOException, InterruptedException
        {
            StringBuilder str = new StringBuilder();

            for(Text x : value)
            {
                if(str.length() > 0)
                {
                    str.append(",");
                }
                str.append(x.toString());
            }
            context.write(key, new Text(str.toString()));
        }

    }
    public static void main(String args[]) throws IOException, ClassNotFoundException, InterruptedException
    {
        Configuration conf = new Configuration();
        conf.set("mapreduce.input.keyvaluelinerecordreader.key.value.separator",",");
        Job job = Job.getInstance(conf, "PatentCitation");
        FileSystem fs = FileSystem.get(conf);
        job.setJarByClass(MyJob.class);
        FileInputFormat.addInputPath(job,new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        job.setMapperClass(Mymapper.class);
        job.setReducerClass(Myreducer.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setInputFormatClass(KeyValueTextInputFormat.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        /*if(fs.exists(new Path(args[1]))){
            //If exist delete the output path
            fs.delete(new Path(args[1]),true);
        }*/
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}

相关问题