hadoopmapreduce:未调用reduce方法

a5g8bdjr  于 2021-05-27  发布在  Hadoop
关注(0)|答案(2)|浏览(294)

我在写MapReduce算法。
在我的代码中 reduce(Text key, Iterable<String> values, Context context) 方法未调用。在它上面我有 @Override 给出错误: Method does not override method from its superclass .
这是我的密码:

package WordCountP;

import java.io.FileReader;
import java.io.IOException;
import java.util.Iterator;

import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;

public class popularity extends Configured implements Tool{

    public class PopularityMapper extends Mapper<Text, Text, Text, Text> {

        @Override
        protected void map(Text key, Text value,
                           Context context)
                throws IOException, InterruptedException {

                JSONParser jsonParser = new JSONParser();
                try {
                    JSONObject jsonobject = (JSONObject) jsonParser.parse(new FileReader("src\\testinput.json"));
                    JSONArray jsonArray = (JSONArray) jsonobject.get("votes");

                    Iterator<JSONObject> iterator = jsonArray.iterator();
                    while(iterator.hasNext()) {
                        JSONObject obj = iterator.next();
                        String song_id_rave_id = (String) obj.get("song_ID") + "|" + (String) obj.get("rave_ID");
                        String preference = (String) obj.get("preference");
                        System.out.println(song_id_rave_id + "||" + preference);
                        context.write(new Text(song_id_rave_id), new Text(preference));
                    }
                }catch(ParseException e) {
                    e.printStackTrace();
                }
        }

    }

    public class PopularityReducer extends Reducer<Text, Iterable<String>, Text, Text> {

        @Override
        protected void reduce(Text key, Iterable<String> values, Context context)
                throws IOException, InterruptedException {

            int sum = 0;
            for ( String val: values){
                if (val == "true"){
                    sum +=1;
                }
                else if (val == "false"){
                    sum -=1;
                }

            }
            String result = Integer.toString(sum);
            context.write(new Text(key), new Text(result));
        }
    }

    public static void main(String[] args) throws Exception{
        int exitCode = ToolRunner.run(new popularity(), args);
        System.exit(exitCode);
    }

    public int run(String[] args) throws Exception {
        if (args.length != 2) {
            System.err.printf("Usage: %s [generic options] <input> <output>\n",
                    getClass().getSimpleName());
            ToolRunner.printGenericCommandUsage(System.err);
            return -1;
        }

        Job job = new org.apache.hadoop.mapreduce.Job();
        job.setJarByClass(popularity.class);
        job.setJobName("PopularityCounter");

        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setOutputFormatClass(TextOutputFormat.class);
        job.setMapperClass(PopularityMapper.class);
        job.setReducerClass(PopularityReducer.class);

        int returnValue = job.waitForCompletion(true) ? 0:1;
        System.out.println("job.isSuccessful " + job.isSuccessful());
        return returnValue;
    }
}

我试过用r大写来命名( Reduce() )但也没用。我假设给这个方法的参数有错误,但是我没有发现任何问题。。。有什么想法吗?
第二,有没有什么方法可以将输出格式设置为 .txt 文件?
仅供参考,我的输入json代码是

{"votes":[{
    "song_ID": "Piece of your heart",
    "mbr_ID": "001",
    "preference": "true",
    "timestamp": "11:22:33",
    "rave_ID": "rave001",
    },
    {
    "song_ID": "Piece of your heart",
    "mbr_ID": "002",
    "preference": "true",
    "timestamp": "11:22:33",
    "rave_ID": "rave001",
    },
    {
    "song_ID": "Atje voor de sfeer",
    "mbr_ID": "001",
    "preference": "false",
    "timestamp": "11:44:33",
    "rave_ID": "rave001",
    },
    {
    "song_ID": "Atje voor de sfeer",
    "mbr_ID": "002",
    "preference": "false",
    "timestamp": "11:44:33",
    "rave_ID": "rave001",
    },
    {
    "song_ID": "Atje voor de sfeer",
    "mbr_ID": "003",
    "preference": "true",
    "timestamp": "11:44:33",
    "rave_ID": "rave001",
    }]
}

提前谢谢!

xmq68pz9

xmq68pz91#

ioexception和interruptedexception都被检查,因此reduce方法不会被重写。
类reducer中的reduce方法不会引发任何异常,因此您不能声明子类中的reduce方法会引发任何选中的异常,但它可以引发未选中的异常。
您可能希望在reduce方法中处理异常。

aemubtdh

aemubtdh2#

最后我们选择了mongobd mapreduce。
hadoop太费劲了。
谢谢!

相关问题