org.apache.spark.sql.analysisexception调用saveastable时出现异常

jv4diomz  于 2021-06-26  发布在  Hive
关注(0)|答案(1)|浏览(381)

如何解决此错误?
下面的代码在zeppelin中工作,但在编译到assemblyjar中并用sparksubmit提交时不起作用。
错误是:
org.apache.spark.sql.analysisexception:不允许为临时表指定数据库名称或其他限定符。如果表名中有点(.),请用反勾号(`)引用表名。;
代码:

import org.apache.spark._
    import org.apache.spark.rdd.NewHadoopRDD
    import org.apache.spark.SparkContext
    import org.apache.spark.SparkContext._
    import org.apache.spark.SparkConf
    import org.apache.spark.sql.SQLContext
    import org.apache.spark.sql.hive.HiveContext 
    import java.text.SimpleDateFormat
    import java.util.Calendar  

    case class Benchmark(date: String, time: String, start_end: String, 
                         server: String, timestamp: Long, interface: String, 
                         cid: String, raw: String)

    object job {

        def main(args: Array[String]) {

            val sdf = new java.text.SimpleDateFormat("yyyyMMdd")
            val sdf1 = new java.text.SimpleDateFormat("yyyy-MM-dd")
            val calendar = Calendar.getInstance()
            calendar.set(Calendar.DAY_OF_YEAR, 
                         calendar.get(Calendar.DAY_OF_YEAR) -1)
            val date = sdf.format(calendar.getTime())
            val dt = sdf1.format(calendar.getTime())

            val conf = new SparkConf().setAppName("Interface_HtoH_Job")
            val sc = new SparkContext(conf)
            val sqlContext = new SQLContext(sc)
            import sqlContext.implicits._
            val hiveContext = new HiveContext(sc)

            val benchmarkText = sc.textFile(s"hdfs:/rawlogs/prod/log/${date}/*.gz")

            val pattern = "([0-9-]{10}) ([0-9:]{8}),[0-9]{1,3} Benchmark..* - (Start|End)<ID=([0-9a-zA-Z_]+)-([0-9]+)><([0-9a-zA-Z.,:!@() =_-]*)><cid=TaskId_([0-9A-Z#_a-z]+),.*><[,0-9:a-zA-Z ]+>".r

            benchmarkText.filter { ln => ln.startsWith("2017-") }
                         .filter { l => l.endsWith(">") }
                         .filter { k => k.contains("<cid=TaskId") }
                         .map { line =>
                                try {
                                    var pattern(date,time,startEnd,server,ts,interface,cid) = line
                                      Benchmark(date,time,startEnd,server,ts.toLong,interface,cid,line)

                                } catch {

                                    case e: Exception => Benchmark(dt,"00:00:00","bad",e.toString,"0".toLong,"bad","bad",line)

                                }

                              }.toDF()
                .write
                .mode("overwrite")
                .saveAsTable("prod_ol_bm.interface_benchmark_tmp") // error here
    }
}

使用spark submit运行于:

HDP : 2.5.3.0-37
Spark : 1.6.2.2.5.3.0-37 built for Hadoop 2.7.3.2.5.3.0-37
vxbzzdmp

vxbzzdmp1#

更改下一行

val sqlContext = new SQLContext(sc)

val sqlContext = new HiveContext(sc)

shell和zeppelin都创建了名为sqlcontext的hivecontext,这有点傻。您需要hivecontext才能连接到hive。

相关问题