hadoop mapreduce失败indexo

aelbi1ox  于 2021-05-27  发布在  Hadoop
关注(0)|答案(0)|浏览(253)

我正在编写一个hadoop mapreduce程序来确定两个用户之间的共同朋友,遵循以下逻辑:输入map函数:

A:B,C,D   (B and C and D are friends of A)
B:A,C,D,E
C:A,B,D,E
D:A,B,C,E
E:B,C,D

下面是期望的输出:

AB:C,D (C D are mutual friends of A and B)
AC:B,D
AD:B,C
BC:A,D,E
BD:A,C,E
BE:C,D
CD:A,B,E
CE:B,D
DE:B,C

当我在termianl中运行mapreduce时,我得到了一个indexoutofboundsexception

Error: java.lang.IndexOutOfBoundsException: Index: 1, Size: 1
    at java.util.ArrayList.rangeCheck(ArrayList.java:635)
    at java.util.ArrayList.get(ArrayList.java:411)
    at GrapheReduce.reduce(GrapheReduce.java:32)
    at GrapheReduce.reduce(GrapheReduce.java:1)
    at org.apache.hadoop.mapred.ReduceTask.runOldReducer(ReduceTask.java:444)
    at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:392)
    at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:164)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:415)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1917)
    at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)

这是Map类

import java.io.IOException;  
import java.util.Arrays;
import java.util.StringTokenizer;  
import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.*;  
import org.apache.hadoop.mapred.*;  
import org.apache.hadoop.util.ToolRunner;

 public class GrapheMap extends MapReduceBase implements  
           Mapper<LongWritable, Text, Text, Text> {  

      // hadoop supported data types  

      // map method that performs the tokenizer job and framing the initial key value pairs  
      public void map(LongWritable key, Text value,  
                OutputCollector<Text, Text> output, Reporter reporter)  
                throws IOException {  

           String line = value.toString();  
           String[] tokens=line.split(":");
           String user=tokens[0];
           String amis=tokens[1]; 
           StringTokenizer tokenizer = new StringTokenizer(amis,",");  

           // iterating through all the words available in that line and forming the key value pair  
           while (tokenizer.hasMoreTokens()) {  

                String couple=user+tokenizer.nextToken();
                char[] tempArray = couple.toCharArray();
                Arrays.sort(tempArray); 
                couple= String.valueOf(tempArray); 
                Text cle=new Text();
                cle.set(couple);
                Text friends=new Text(); friends.set(amis);
                output.collect(cle,friends);
           }             
      }
 }

以及reduce类

import java.io.IOException;  
import java.util.ArrayList;
import java.util.Iterator;  

    import org.apache.hadoop.io.*;  
    import org.apache.hadoop.mapred.*;  

    public class GrapheReduce extends MapReduceBase implements  
              Reducer<Text, Text, Text, Text> {  
         /*  
          * reduce method accepts the Key Value pairs from mappers, do the  
          * aggregation based on keys and produce the final out put  
          */  

         public void reduce(Text key, Iterator<Text> values,  
                   OutputCollector<Text, Text> output, Reporter reporter)  
                   throws IOException {  
               String amicom=": ";

             ArrayList<String> vals = new ArrayList<String>();

        while (values.hasNext()) {  
             vals.add(values.next().toString()); 
       }  
        char[] e1=vals.get(0).toCharArray();
        //*
        for (int i=0;i<e1.length;i++){
            if (vals.get(1).indexOf(e1[i])>=0){
                amicom += e1[i]+ ", ";
        }
            String res="";

            if(amicom == ""){
                res="Pas d'amis en commun";
            }else
            {
                res=amicom;
            }

       output.collect(key, new Text(res));        
         }  
        //*

    }}

还有考试班

import org.apache.hadoop.fs.Path;  
import org.apache.hadoop.conf.*;  
import org.apache.hadoop.io.*;  
import org.apache.hadoop.mapred.*;  
import org.apache.hadoop.util.*;  

public class GrapheTest extends Configured implements Tool {  
     public int run(String[] args) throws Exception {  
          // creating a JobConf object and assigning a job name for identification purposes  
          JobConf conf = new JobConf(getConf(), GrapheTest.class);  
          conf.setJobName("Graphe");  

          // Setting configuration object with the Data Type of output Key and Value  
          conf.setOutputKeyClass(Text.class);  
          conf.setOutputValueClass(Text.class);  

          // Providing the mapper and reducer class names  
          conf.setMapperClass(GrapheMap.class);  
          conf.setReducerClass(GrapheReduce.class);  

          // the hdfs input and output directory to be fetched from the command line  
          FileInputFormat.addInputPath(conf, new Path(args[0]));  
          FileOutputFormat.setOutputPath(conf, new Path(args[1]));  

          JobClient.runJob(conf);  
          return 0;  
     }  

     public static void main(String[] args) throws Exception {  
          int res = ToolRunner.run(new Configuration(), new GrapheTest(), args);  
          System.exit(res);  
     }  
}

暂无答案!

目前还没有任何答案,快来回答吧!

相关问题