java.lang.noclassdeffounderror:org/apache/commons/logging/logfactory在Hadoop1.2.1Kmean算法中

gopyfrb3  于 2021-05-30  发布在  Hadoop
关注(0)|答案(1)|浏览(364)

我试着以身作则http://codingwiththomas.blogspot.kr/2011/05/k-means-clustering-with-mapreduce.html 但它有错误

log4j:WARN Error during default initialization
java.lang.NoClassDefFoundError: org/apache/log4j/AppenderSkeleton
    at java.lang.ClassLoader.findBootstrapClass(Native Method)
    at java.lang.ClassLoader.findBootstrapClassOrNull(ClassLoader.java:1070)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:414)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:412)
    at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
    at java.lang.Class.forName0(Native Method)
    at java.lang.Class.forName(Class.java:190)
    at org.apache.log4j.helpers.Loader.loadClass(Loader.java:179)
    at org.apache.log4j.helpers.OptionConverter.instantiateByClassName(OptionConverter.java:320)
    at org.apache.log4j.helpers.OptionConverter.instantiateByKey(OptionConverter.java:121)
    at org.apache.log4j.PropertyConfigurator.parseAppender(PropertyConfigurator.java:664)
    at org.apache.log4j.PropertyConfigurator.parseCategory(PropertyConfigurator.java:647)
    at org.apache.log4j.PropertyConfigurator.configureRootCategory(PropertyConfigurator.java:544)
    at org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:440)
    at org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:476)
    at org.apache.log4j.helpers.OptionConverter.selectAndConfigure(OptionConverter.java:471)
    at org.apache.log4j.LogManager.<clinit>(LogManager.java:125)
    at org.apache.log4j.Logger.getLogger(Logger.java:105)
    at org.apache.commons.logging.impl.Log4JLogger.getLogger(Log4JLogger.java:289)
    at org.apache.commons.logging.impl.Log4JLogger.<init>(Log4JLogger.java:109)
    at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
    at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
    at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
    at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
    at org.apache.commons.logging.impl.LogFactoryImpl.createLogFromClass(LogFactoryImpl.java:1116)
    at org.apache.commons.logging.impl.LogFactoryImpl.discoverLogImplementation(LogFactoryImpl.java:914)
    at org.apache.commons.logging.impl.LogFactoryImpl.newInstance(LogFactoryImpl.java:604)
    at org.apache.commons.logging.impl.LogFactoryImpl.getInstance(LogFactoryImpl.java:336)
    at org.apache.commons.logging.impl.LogFactoryImpl.getInstance(LogFactoryImpl.java:310)
    at org.apache.commons.logging.LogFactory.getLog(LogFactory.java:685)
    at com.clustering.mapreduce.KMeansClusteringJob.<clinit>(KMeansClusteringJob.java:22)

Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/commons/logging/LogFactory
    at org.apache.hadoop.conf.Configuration.<clinit>(Configuration.java:146)
    at com.clustering.mapreduce.KMeansClusteringJob.main(KMeansClusteringJob.java:28)

尽管我在项目中添加了hadoop/lib和hadoop.core.1.2.1.jar上的所有jar文件。我的代码

public class KMeansClusteringJob {

private static final Log LOG = LogFactory.getLog(KMeansClusteringJob.class);

public static void main(String[] args) throws IOException,
        InterruptedException, ClassNotFoundException {

    int iteration = 1;
    Configuration conf = new Configuration();
    conf.set("num.iteration", iteration + "");

    Path in = new Path("files/clustering/import/data");
    Path center = new Path("files/clustering/import/center/cen.seq");
    conf.set("centroid.path", center.toString());
    Path out = new Path("files/clustering/depth_1");

    Job job = new Job(conf);
    job.setJobName("KMeans Clustering");

    job.setMapperClass(KMeansMapper.class);
    job.setReducerClass(KMeansReducer.class);
    job.setJarByClass(KMeansMapper.class);

    SequenceFileInputFormat.addInputPath(job, in);
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(out))
        fs.delete(out, true);

    if (fs.exists(center))
        fs.delete(out, true);

    if (fs.exists(in))
        fs.delete(out, true);

    final SequenceFile.Writer centerWriter = SequenceFile.createWriter(fs,
            conf, center, ClusterCenter.class, IntWritable.class);
    final IntWritable value = new IntWritable(0);
    centerWriter.append(new ClusterCenter(new Vector(1, 1)), value);
    centerWriter.append(new ClusterCenter(new Vector(5, 5)), value);
    centerWriter.close();

    final SequenceFile.Writer dataWriter = SequenceFile.createWriter(fs,
            conf, in, ClusterCenter.class, Vector.class);
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(1, 2));
    dataWriter.append(new ClusterCenter(new Vector(0, 0)),
            new Vector(16, 3));
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(3, 3));
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(2, 2));
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(2, 3));
    dataWriter.append(new ClusterCenter(new Vector(0, 0)),
            new Vector(25, 1));
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(7, 6));
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(6, 5));
    dataWriter.append(new ClusterCenter(new Vector(0, 0)), new Vector(-1,
            -23));
    dataWriter.close();

    SequenceFileOutputFormat.setOutputPath(job, out);
    job.setInputFormatClass(SequenceFileInputFormat.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);

    job.setOutputKeyClass(ClusterCenter.class);
    job.setOutputValueClass(Vector.class);

    job.waitForCompletion(true);

    long counter = job.getCounters()
            .findCounter(KMeansReducer.Counter.CONVERGED).getValue();
    iteration++;
    while (counter > 0) {
        conf = new Configuration();
        conf.set("centroid.path", center.toString());
        conf.set("num.iteration", iteration + "");
        job = new Job(conf);
        job.setJobName("KMeans Clustering " + iteration);

        job.setMapperClass(KMeansMapper.class);
        job.setReducerClass(KMeansReducer.class);
        job.setJarByClass(KMeansMapper.class);

        in = new Path("files/clustering/depth_" + (iteration - 1) + "/");
        out = new Path("files/clustering/depth_" + iteration);

        SequenceFileInputFormat.addInputPath(job, in);
        if (fs.exists(out))
            fs.delete(out, true);

        SequenceFileOutputFormat.setOutputPath(job, out);
        job.setInputFormatClass(SequenceFileInputFormat.class);
        job.setOutputFormatClass(SequenceFileOutputFormat.class);
        job.setOutputKeyClass(ClusterCenter.class);
        job.setOutputValueClass(Vector.class);

        job.waitForCompletion(true);
        iteration++;
        counter = job.getCounters()
                .findCounter(KMeansReducer.Counter.CONVERGED).getValue();
    }

    Path result = new Path("files/clustering/depth_" + (iteration - 1)
            + "/");

    FileStatus[] stati = fs.listStatus(result);
    for (FileStatus status : stati) {
        if (!status.isDir()) {
            Path path = status.getPath();
            LOG.info("FOUND " + path.toString());
            SequenceFile.Reader reader = new SequenceFile.Reader(fs, path,
                    conf);
            ClusterCenter key = new ClusterCenter();
            Vector v = new Vector();
            while (reader.next(key, v)) {
                LOG.info(key + " / " + v);
            }
            reader.close();
        }
    }
}

应用程序是否需要更多jar

s3fp2yjn

s3fp2yjn1#

这个错误显然要求使用apachecommons日志jar。请将该jar添加到类路径中。

相关问题