错误:无法访问org.apache.hadoop.mapred.mapreducebase

3phpmpom  于 2021-06-03  发布在  Hadoop
关注(0)|答案(1)|浏览(360)

我编写了这个javahadoop程序,它将执行文件的并行索引

package org.myorg;

import java.io.*;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;

public class ParallelIndexation {

public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> { 
     private final static IntWritable zero = new IntWritable(0); 
     private Text word = new Text();
     public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { 
        String line = value.toString();
        int CountComputers;
        //DataInputStream ConfigFile = new DataInputStream( new FileInputStream("countcomputers.txt"));
        FileInputStream fstream = new FileInputStream("/usr/countcomputers.txt"); // путь к файлу
        DataInputStream in = new DataInputStream(fstream);
        BufferedReader br = new BufferedReader(new InputStreamReader(in));
        String result = br.readLine(); // читаем как строку
        CountComputers = Integer.parseInt(result); // переводим строку в число
        //CountComputers=ConfigFile.readInt();
        in.close();
        fstream.close();
        ArrayList<String> paths = new ArrayList<String>();
        StringTokenizer tokenizer = new StringTokenizer(line, "\n");
        while (tokenizer.hasMoreTokens()) 
        {
          paths.add(tokenizer.nextToken());
        }
        String[] ConcatPaths= new String[CountComputers];
        int NumberOfElementConcatPaths=0;
        if (paths.size()%CountComputers==0)
        {
            for (int i=0; i<CountComputers; i++)
            {
                ConcatPaths[i]=paths.get(NumberOfElementConcatPaths);
                NumberOfElementConcatPaths+=paths.size()/CountComputers;
                for (int j=1; j<paths.size()/CountComputers; j++)
                {
                    ConcatPaths[i]+="\n"+paths.get(i*paths.size()/CountComputers+j);
                }
            }
        }
        else
        {
            NumberOfElementConcatPaths=0;
            for (int i=0; i<paths.size()%CountComputers; i++)
            {
                ConcatPaths[i]=paths.get(NumberOfElementConcatPaths);
                NumberOfElementConcatPaths+=paths.size()/CountComputers+1;              
                for (int j=1; j<paths.size()/CountComputers+1; j++)
                {
                    ConcatPaths[i]+="\n"+paths.get(i*(paths.size()/CountComputers+1)+j);
                }           
            }
            for (int k=paths.size()%CountComputers; k<CountComputers; k++)
            {
                ConcatPaths[k]=paths.get(NumberOfElementConcatPaths);
                NumberOfElementConcatPaths+=paths.size()/CountComputers;                
                for (int j=1; j<paths.size()/CountComputers; j++)
                {
                    ConcatPaths[k]+="\n"+paths.get((k-paths.size()%CountComputers)*paths.size()/CountComputers+paths.size()%CountComputers*(paths.size()/CountComputers+1)+j);
                }                   
            }
        }
        //CountComputers=ConfigFile.readInt();
        for (int i=0; i<ConcatPaths.length; i++)
        {
            word.set(ConcatPaths[i]);
            output.collect(word, zero);
        }
     }
}

public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, LongWritable> { 
    public native long Traveser(String Path);
    public native void Configure(String Path);
    public void reduce(Text key, <Iterator>IntWritable value, OutputCollector<Text, LongWritable> output, Reporter reporter) throws IOException { 
        long count;
        String line = key.toString();
        ArrayList<String> ProcessedPaths = new ArrayList<String>();
        StringTokenizer tokenizer = new StringTokenizer(line, "\n");
        while (tokenizer.hasMoreTokens()) 
        {
          ProcessedPaths.add(tokenizer.nextToken());
        }       
        Configure("/etc/nsindexer.conf");
        for (int i=0; i<ProcessedPaths.size(); i++)
        {
            count=Traveser(ProcessedPaths.get(i));
        }
        output.collect(key, new LongWritable(count));
      }
    static
    {
        System.loadLibrary("nativelib");
    } 
}

public static void main(String[] args) throws Exception { 
      JobConf conf = new JobConf(ParallelIndexation.class); 
      conf.setJobName("parallelindexation");
      conf.setOutputKeyClass(Text.class);
      conf.setOutputValueClass(LongWritable.class);
      conf.setMapperClass(Map.class);
      conf.setCombinerClass(Reduce.class);
      conf.setReducerClass(Reduce.class);
      conf.setInputFormat(TextInputFormat.class);
      conf.setOutputFormat(TextOutputFormat.class);
      FileInputFormat.setInputPaths(conf, new Path(args[0]));
      FileOutputFormat.setOutputPath(conf, new Path(args[1]));
      JobClient.runJob(conf);
  } 
}

我用命令编译了这个程序

javac -classpath /export/hadoop-1.0.1/hadoop-core-1.0.1.jar -d folder/classes folder/src/ParallelIndexation.java,

然后对于本机方法,我尝试创建.h文件

root@one:/export/hadoop-1.0.1/folder/classes/# javah -jni org.myorg.ParallelIndexation

收到以下错误

Error: cannot access org.apache.hadoop.mapred.MapReduceBase
  class file for org.apache.hadoop.mapred.MapReduceBase not found

在目录/export/hadoop-1.0.1/folder/classes/org/myorg中有3个文件:parallelindexation$map.class、parallelindexation$reduce.class、parallelindexation.class。

bqf10yzr

bqf10yzr1#

javah 找不到 org.apache.hadoop.mapred.MapReduceBase . 我想那个班在上课 /export/hadoop-1.0.1/hadoop-core-1.0.1.jar . 所以你必须把它加到 javah 的类路径: javah -classpath /export/hadoop-1.0.1/hadoop-core-1.0.1.jar -jni org.myorg.ParallelIndexation

相关问题