无法获取broadcast5的broadcast5\u片段0

pxyaymoc  于 2021-05-27  发布在  Spark
关注(0)|答案(1)|浏览(342)

“获取错误” Failed to get broadcast_5_piece0 of broadcast5 “使用2个工作示例运行spark应用程序时。我还设置了 spark.cleaner.ttl ,之后我也得到了同样的错误。有人能帮忙吗?
完整堆栈跟踪可在以下位置找到:

java.io.IOException: org.apache.spark.SparkException: Failed to get broadcast_5_piece0 of broadcast_5
    at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1212)
    at org.apache.spark.broadcast.TorrentBroadcast.readBroadcastBlock(TorrentBroadcast.scala:165)
    at org.apache.spark.broadcast.TorrentBroadcast._value$lzycompute(TorrentBroadcast.scala:64)
    at org.apache.spark.broadcast.TorrentBroadcast._value(TorrentBroadcast.scala:64)
    at org.apache.spark.broadcast.TorrentBroadcast.getValue(TorrentBroadcast.scala:88)
    at org.apache.spark.broadcast.Broadcast.value(Broadcast.scala:70)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
    at org.apache.spark.scheduler.Task.run(Task.scala:89)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
    at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.spark.SparkException: Failed to get broadcast_5_piece0 of broadcast_5
    at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$org$apache$spark$broadcast$TorrentBroadcast$$readBlocks$1$$anonfun$2.apply(TorrentBroadcast.scala:138)
    at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$org$apache$spark$broadcast$TorrentBroadcast$$readBlocks$1$$anonfun$2.apply(TorrentBroadcast.scala:138)
    at scala.Option.getOrElse(Option.scala:120)
    at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$org$apache$spark$broadcast$TorrentBroadcast$$readBlocks$1.apply$mcVI$sp(TorrentBroadcast.scala:137)
    at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$org$apache$spark$broadcast$TorrentBroadcast$$readBlocks$1.apply(TorrentBroadcast.scala:120)
    at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$org$apache$spark$broadcast$TorrentBroadcast$$readBlocks$1.apply(TorrentBroadcast.scala:120)
    at scala.collection.immutable.List.foreach(List.scala:318)
    at org.apache.spark.broadcast.TorrentBroadcast.org$apache$spark$broadcast$TorrentBroadcast$$readBlocks(TorrentBroadcast.scala:120)
    at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$readBroadcastBlock$1.apply(TorrentBroadcast.scala:175)
    at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1205)
    ... 11 more

正在添加代码。。。。。。。。。。。。

public class Insert_into_cassandra implements Serializable {
    static List<String> signal_name_pass=new ArrayList<String>();
    static int count=4;
    static SparkConf conf=new SparkConf().setAppName("Insert_into_cassandra").setMaster("local").set("spark.cassandra.connection.host", "127.0.0.1");
    static JavaSparkContext jspc=new JavaSparkContext(conf);
    static SparkContextJavaFunctions functions=CassandraJavaUtil.javaFunctions(jspc);
     static Insert_into_cassandra iic=new Insert_into_cassandra();
     static int  value_in_db=0;
public static  void main(String gg[])
{

JavaRDD<String> rbmfile=jspc.textFile("/home/amd/Desktop/prac");

JavaPairRDD<String, Parse_Object> signal_name=rbmfile.mapToPair(new PairFunction<String, String, Parse_Object>() {

    public Tuple2<String, Parse_Object> call(String x) throws Exception {

        return new Tuple2<String, Parse_Object>(x.split("  ")[0], new Parse_Object(x.split("  ")[1],x.split("  ")[2],x.split("  ")[3]));
    }
});

JavaRDD<CassandraRow> signal_name_cassandra=functions.cassandraTable("tutorialspoint", "stuff_id_2");

JavaRDD<CassandraRow> sort=signal_name_cassandra.sortBy(new Function<CassandraRow, String >() {

    public String call(CassandraRow x) throws Exception {

        return x.getString(0);
    }
}, false, 1).coalesce(1);

value_in_db=Integer.parseInt(sort.first().getString(0));

JavaPairRDD<String, String> take_signal_name=signal_name_cassandra.mapToPair(new PairFunction<CassandraRow, String, String>() {

    public Tuple2<String, String> call(CassandraRow x) throws Exception {
        // TODO Auto-generated method stub

        return new Tuple2<String, String>(x.getString(1),x.getString(0));
    }
});

/*JavaPairRDD<String, String> getting_max_id=signal_name_cassandra.mapToPair(new PairFunction<CassandraRow, String, String>() {

    @Override
    public Tuple2<String, String> call(CassandraRow x) throws Exception {
        // TODO Auto-generated method stub

        return new Tuple2<String, String>(x.getString(0),x.getString(1));
    }
});*/

 JavaPairRDD<String, Tuple2<Optional<String>, Parse_Object>> join=take_signal_name.rightOuterJoin(signal_name);

JavaPairRDD<String, String> getting_id=join.mapToPair(new PairFunction<Tuple2<String,Tuple2<Optional<String>,Parse_Object>>, String, String>() {

    public Tuple2<String, String> call(
            Tuple2<String, Tuple2<Optional<String>, Parse_Object>> x)
            throws Exception {

        if(x._2()._1().isPresent())
        {
            System.out.println("if----"+x._1());
            return new Tuple2<String, String>(x._1().toString(), x._2()._1().toString());

        }

        else
        {
            signal_name_pass.add(x._1());

        }
    return null;

    }

}).filter(new Function<Tuple2<String,String>, Boolean>() {

    public Boolean call(Tuple2<String, String> x) throws Exception {
        // TODO Auto-generated method stub
        return x!=null;
    }
});
getting_id.saveAsTextFile("/home/amd/Desktop/smal/get13");
rkue9o1l

rkue9o1l1#

在现代版本的spark中,设置spark cleaner ttl几乎总是错误的。这可能会导致过早删除重要的缓存数据。让内置上下文清洁器做它的工作要安全得多。
如果关闭ttl不能解决问题,您需要提供更多相关细节,如:
遗嘱执行人有可疑记录吗?
异常的完整堆栈跟踪?
您正在运行的代码的示例?

相关问题