运行hadoop时磁盘已满

vecaoik1  于 2021-05-30  发布在  Hadoop
关注(0)|答案(3)|浏览(717)

我运行了一个递归的map/reduce程序。出了问题,它几乎消耗了c驱动器中所有可用的磁盘空间。所以我关闭了资源管理器、节点管理器、名称节点、数据节点控制台。现在我有一个c驱动器几乎是满的,我不知道如何清空磁盘空间,使我的c驱动器,因为它是以前。我现在该怎么办。感谢您的帮助。这是密码
公共类先验{

public static class CandidateGenMap extends Mapper<LongWritable, Text, Text, Text>
{
    private Text word = new Text();
    private Text count = new Text();
    private int Support = 5; 

    public void CandidatesGenRecursion(Vector<String> in, Vector<String> out, 
                                        int length, int level, int start,                           
                                        Context context) throws IOException {

        int i,size;

        for(i=start;i<length;i++) {
            if(level==0){
                out.add(in.get(i));
                } else {

                out.add(in.get(i));

                int init=1;
                StringBuffer current = new StringBuffer();
                for(String s:out)
                {   
                    if(init==1){
                        current.append(s);  
                        init=0;
                    } else {
                        current.append(" ");
                        current.append(s);  
                    }
                }

                word.set(current.toString());
                count.set(Integer.toString(1));
                try {
                    context.write(word, count);
                } catch (InterruptedException e) {
                    // TODO Auto-generated catch block
                    e.printStackTrace();
                }
            }
            if(i < length-1) {
                CandidatesGenRecursion(in, out, length,level+1,i+1, context);
            }
            size = out.size();
            if(size>0){
                out.remove(size-1);
            }
        }

    }

    @Override
    public void map(LongWritable key,Text value,Context context) throws IOException
    {
        String line = value.toString();
        StringTokenizer tokenizer = new StringTokenizer(line);
        String[] token=new String[2];
       int i=0;
        while(tokenizer.hasMoreTokens()){
           token[i]= tokenizer.nextToken();
           ++i;
        }

        StringTokenizer urlToken = new StringTokenizer(token[1],",");

        Vector<String> lst = new Vector<String>(); 
        int loop=0;
        while (urlToken.hasMoreTokens()) {

            String str = urlToken.nextToken();

                lst.add(str);
                loop++;

        }

        Vector<String> combinations = new Vector<String>(); 

        if(!lst.isEmpty()) {
            CandidatesGenRecursion(lst, combinations, loop,0,0, context);
        }

    }
}

public static class CandidateGenReduce extends Reducer<Text, IntWritable, Text, IntWritable>
{

    public void reduce(Text key,Iterator<IntWritable> values,Context context) throws IOException
    {

        int sum = 0;            
        while (values.hasNext()) {
            sum += values.next().get();
        }
        try {               
            context.write(key, new IntWritable(sum));
            } catch (InterruptedException e) {              
            e.printStackTrace();
        }       

    }
}

public static void main(String[] args) throws Exception
{

    Date dt;
    long start,end; // Start and end time

    //Start Timer
    dt = new Date();
    start = dt.getTime();

    Configuration conf1 = new Configuration();
    System.out.println("Starting Job2");
    Job job2 = new Job(conf1, "apriori candidate gen");
    job2.setJarByClass(apriori.class);

    job2.setMapperClass(CandidateGenMap.class);
    job2.setCombinerClass(CandidateGenReduce.class); //
    job2.setReducerClass(CandidateGenReduce.class);
    job2.setMapOutputKeyClass(Text.class);
    job2.setMapOutputValueClass(Text.class);
    job2.setOutputKeyClass(Text.class);
    job2.setOutputValueClass(IntWritable.class);

    job2.setInputFormatClass(TextInputFormat.class);
    job2.setOutputFormatClass(TextOutputFormat.class);

    FileInputFormat.addInputPath(job2, new Path(args[0]));
    FileOutputFormat.setOutputPath(job2, new Path(args[1]));
    job2.waitForCompletion(true);
    //End Timer
    dt = new Date();
    end = dt.getTime();

}

}

dz6r00yl

dz6r00yl1#

签入hdfs作业输出路径并删除内容。
列表内容:

$ sudo -u hdfs hadoop fs -ls [YourJobOutputPath]

使用的磁盘:

$ sudo -u hdfs hadoop fs -du -h [YourJobOutputPath]

删除内容(小心!,它是递归的):

$ sudo -u hdfs hadoop fs -rm -R [YourJobOutputPath]
6g8kf2rb

6g8kf2rb2#

hadoop在每个阶段(map、reduce等)的i/0操作都需要足够的磁盘空间。

gorkyyrv

gorkyyrv3#

删除输出目录可能有助于从mapreduce作业创建的文件中释放磁盘。

相关问题