java.lang.verifyerror与hadoop

bd1hkmkf  于 2021-06-02  发布在  Hadoop
关注(0)|答案(1)|浏览(566)

我在一个使用hadoop的java项目中工作,我有一个java.lang.verifyerror,我不知道如何解决它。我看到人们有同样的问题,但没有答案或解决方案是不工作在我的情况下。
我的班级:

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;

public class GetStats {

    public static List<Statistique> stats; // class with one String an one int

    public static class TokenizerMapper extends
            Mapper<Object, Text, Text, IntWritable> {

        private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();

        public void map(Object key, Text value, Context context)
                throws IOException, InterruptedException {
            StringTokenizer itr = new StringTokenizer(value.toString());
            while (itr.hasMoreTokens()) {
                word.set(itr.nextToken());
                context.write(word, one);
            }
        }
    }

    public static class IntSumReducer extends
            Reducer<Text, IntWritable, Text, IntWritable> {
        private IntWritable result = new IntWritable();

        public void reduce(Text key, Iterable<IntWritable> values,
                Context context) throws IOException, InterruptedException {
            int sum = 0;
            for (IntWritable val : values) {
                sum += val.get();
            }
            result.set(sum);
            if (key.toString().contains("HEAD")
                    || key.toString().contains("POST")
                    || key.toString().contains("GET")
                    || key.toString().contains("OPTIONS")
                    || key.toString().contains("CONNECT"))
                GetStats.stats.add(new Statistique(key.toString().replace("\"", ""), sum));
            context.write(key, result);
        }
    }

    public static void main(String[] args) throws Exception {
        System.out.println("Start wc");
        stats = new ArrayList<>();

//      File file = new File("err.txt");
//      FileOutputStream fos = new FileOutputStream(file);
//      PrintStream ps = new PrintStream(fos);
//      System.setErr(ps);

        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf, "word count");
        job.setJarByClass(GetStats.class);
        job.setMapperClass(TokenizerMapper.class);
//      job.setCombinerClass(IntSumReducer.class);
        job.setReducerClass(IntSumReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.addInputPath(job, new Path("input"));
        job.setOutputFormatClass(NullOutputFormat.class);

        job.waitForCompletion(true);

        System.out.println(stats);
        System.out.println("End");
    }
}

错误是:

Exception in thread "main" java.lang.VerifyError: Bad type on operand stack
Exception Details:
  Location:
    org/apache/hadoop/mapred/JobTrackerInstrumentation.create(Lorg/apache/hadoop/mapred/JobTracker;Lorg/apache/hadoop/mapred/JobConf;)Lorg/apache/hadoop/mapred/JobTrackerInstrumentation; @5: invokestatic
  Reason:
    Type 'org/apache/hadoop/metrics2/lib/DefaultMetricsSystem' (current frame, stack[2]) is not assignable to 'org/apache/hadoop/metrics2/MetricsSystem'
  Current Frame:
    bci: @5
    flags: { }
    locals: { 'org/apache/hadoop/mapred/JobTracker', 'org/apache/hadoop/mapred/JobConf' }
    stack: { 'org/apache/hadoop/mapred/JobTracker', 'org/apache/hadoop/mapred/JobConf', 'org/apache/hadoop/metrics2/lib/DefaultMetricsSystem' }
  Bytecode:
    0000000: 2a2b b200 03b8 0004 b0                

    at org.apache.hadoop.mapred.LocalJobRunner.<init>(LocalJobRunner.java:573)
    at org.apache.hadoop.mapred.JobClient.init(JobClient.java:494)
    at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:479)
    at org.apache.hadoop.mapreduce.Job$1.run(Job.java:563)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:415)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
    at org.apache.hadoop.mapreduce.Job.connect(Job.java:561)
    at org.apache.hadoop.mapreduce.Job.submit(Job.java:549)
    at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:580)
    at hadoop.GetStats.main(GetStats.java:79)

你知道吗?如果你还需要什么帮助,尽管问。

sd2nnvve

sd2nnvve1#

我解决了我的问题。
导入的jar不错,但是我之前尝试过的另一个版本(可能是旧版本)也在project文件夹中。当我调用该类时,似乎使用了中较旧版本的jar。而且,这个jar在我想要的类路径之前。我从项目文件夹中删除了旧的jar,它就工作了。

相关问题