调用saveAsTable时的org.apache.spark.sql.AnalysisException

时间:2017-06-30 13:33:42

标签: hive spark-dataframe hortonworks-data-platform

如何解决此错误?

下面的代码在Zeppelin中有效,但在编译到程序集jar中时却没有,并且使用spark-submit提交。

错误是:

  

org.apache.spark.sql.AnalysisException:指定数据库名称或   临时表不允许使用其他限定符。如果表   name中有点(。),请用反引号引用表名   (`);

代码:

    import org.apache.spark._
    import org.apache.spark.rdd.NewHadoopRDD
    import org.apache.spark.SparkContext
    import org.apache.spark.SparkContext._
    import org.apache.spark.SparkConf
    import org.apache.spark.sql.SQLContext
    import org.apache.spark.sql.hive.HiveContext 
    import java.text.SimpleDateFormat
    import java.util.Calendar  

    case class Benchmark(date: String, time: String, start_end: String, 
                         server: String, timestamp: Long, interface: String, 
                         cid: String, raw: String)

    object job {

        def main(args: Array[String]) {

            val sdf = new java.text.SimpleDateFormat("yyyyMMdd")
            val sdf1 = new java.text.SimpleDateFormat("yyyy-MM-dd")
            val calendar = Calendar.getInstance()
            calendar.set(Calendar.DAY_OF_YEAR, 
                         calendar.get(Calendar.DAY_OF_YEAR) -1)
            val date = sdf.format(calendar.getTime())
            val dt = sdf1.format(calendar.getTime())

            val conf = new SparkConf().setAppName("Interface_HtoH_Job")
            val sc = new SparkContext(conf)
            val sqlContext = new SQLContext(sc)
            import sqlContext.implicits._
            val hiveContext = new HiveContext(sc)

            val benchmarkText = sc.textFile(s"hdfs:/rawlogs/prod/log/${date}/*.gz")

            val pattern = "([0-9-]{10}) ([0-9:]{8}),[0-9]{1,3} Benchmark..* - (Start|End)<ID=([0-9a-zA-Z_]+)-([0-9]+)><([0-9a-zA-Z.,:!@() =_-]*)><cid=TaskId_([0-9A-Z#_a-z]+),.*><[,0-9:a-zA-Z ]+>".r

            benchmarkText.filter { ln => ln.startsWith("2017-") }
                         .filter { l => l.endsWith(">") }
                         .filter { k => k.contains("<cid=TaskId") }
                         .map { line =>
                                try {
                                    var pattern(date,time,startEnd,server,ts,interface,cid) = line
                                      Benchmark(date,time,startEnd,server,ts.toLong,interface,cid,line)

                                } catch {

                                    case e: Exception => Benchmark(dt,"00:00:00","bad",e.toString,"0".toLong,"bad","bad",line)

                                }

                              }.toDF()
                .write
                .mode("overwrite")
                .saveAsTable("prod_ol_bm.interface_benchmark_tmp") // error here
    }
}

使用spark-submit运行:

HDP : 2.5.3.0-37
Spark : 1.6.2.2.5.3.0-37 built for Hadoop 2.7.3.2.5.3.0-37

1 个答案:

答案 0 :(得分:1)

更改以下行

val sqlContext = new SQLContext(sc)

val sqlContext = new HiveContext(sc)

shell和zeppelin都创建了名为sqlContext的HiveContext,这有点傻。 您需要HiveContext才能连接到配置单元。

相关问题