toDF不是Seq的成员,toDS不是Seq的成员

时间:2020-03-10 21:45:29

标签: scala apache-spark

我正在尝试使用以下代码将序列转换为数据帧或数据集。这似乎很简单,但是这样做时却出现异常。不确定我做错了什么。我试图找出解决方案类似的问题,并且按照我将类定义移出main之外的方法,但仍然遇到问题。下面是代码

package sparkWCExample.spWCExample
      import org.apache.log4j.Level
      import org.apache.spark.sql.{Dataset, SparkSession , DataFrame , Row , Encoders }
      import org.apache.spark.sql.functions._
      import org.apache.spark.SparkContext
      import org.apache.spark.SparkConf
      import org.apache.spark.sql.Row
      import org.apache.spark.sql.Dataset



      // Create the case classes for our domain
case class Department(id: String, name: String)
case class Person(name: String, age: Long)


object DatasetExample  {
             def  main(args: Array[String]){
          println("Start now")
          val conf = new SparkConf().setAppName("Spark Scala WordCount Example").setMaster("local[1]")
        val spark = SparkSession.builder().config(conf).appName("CsvExample").master("local").getOrCreate()
        val sqlContext = new org.apache.spark.sql.SQLContext(spark.sparkContext)
        import sqlContext.implicits._
        import spark.implicits._

//val df = spark.read.options(Map("inferSchema"->"true","delimiter"->",","header"->"true")).csv("C:\\Sankha\\Study\\data\\salary.csv")

// Create the Departments
val department1 = new Department("123456", "Computer Science")
val department2 = new Department("789012", "Mechanical Engineering")
val department3 = new Department("345678", "Theater and Drama")
val department4 = new Department("901234", "Indoor Recreation")

val caseClassDS = Seq(Person("Andy", 32)).toDS()
val df = Seq(department1,department2,department3,department4).toDF



        }
}

我正在使用spark 2.4.5和scala 2.12,上面的代码是用scala ide写的,下面是例外

toDF不是Seq [sparkWCExample.spWCExample.Department的成员 toDS不是Seq [sparkWCExample.spWCExample.Person

的成员

1 个答案:

答案 0 :(得分:0)

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

// Create the case classes for our domain
case class Department(id: String, name: String)
case class Person(name: String, age: Long)

object DatasetExample {

  def main(args: Array[String]) {
    println("Start now")
    val conf = new SparkConf().setAppName("Spark Scala WordCount Example").setMaster("local[1]")
    val spark = SparkSession.builder().config(conf).appName("CsvExample").master("local").getOrCreate()
    val sc: SparkContext = spark.sparkContext
    import spark.implicits._

    //val df = spark.read.options(Map("inferSchema"->"true","delimiter"->",","header"->"true")).csv("C:\\Sankha\\Study\\data\\salary.csv")

    // Create the Departments
    val department1 = Department("123456", "Computer Science")
    val department2 = Department("789012", "Mechanical Engineering")
    val department3 = Department("345678", "Theater and Drama")
    val department4 = Department("901234", "Indoor Recreation")

    val caseClassDS: Dataset[Person] = Seq(Person("Andy", 32)).toDS()
    val df: DataFrame = Seq(department1, department2, department3, department4).toDF

  }
}

您还使用了不赞成使用的Spark上下文和初始化,并且清理了很多未使用的导入。 代码本身很好,唯一的问题是在spark上下文中。

PS:我建议您看一下Spark文档,以更好地理解

相关问题