天天看點

spark任務送出到yarn上運作報錯

1、報錯資訊

java.sql.SQLException: No suitable driver
	at java.sql.DriverManager.getDriver(DriverManager.java:315)
	at org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions$$anonfun$7.apply(JDBCOptions.scala:84)
	at org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions$$anonfun$7.apply(JDBCOptions.scala:84)
	at scala.Option.getOrElse(Option.scala:121)
	at org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions.<init>(JDBCOptions.scala:83)
	at org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions.<init>(JDBCOptions.scala:34)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider.createRelation(JdbcRelationProvider.scala:32)
	at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:306)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:178)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:146)
	at com.dataexa.cp.base.datasource.DataBaseToDF.convert(DataBaseToDF.scala:22)
	at com.dataexa.cp.base.datasource.DataSourceReader$$anonfun$getResult$1.apply(DataSourceReader.scala:63)
	at com.dataexa.cp.base.datasource.DataSourceReader$$anonfun$getResult$1.apply(DataSourceReader.scala:56)
	at scala.collection.Iterator$class.foreach(Iterator.scala:893)
	at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
	at scala.collection.MapLike$DefaultKeySet.foreach(MapLike.scala:174)
	at com.dataexa.cp.base.datasource.DataSourceReader.getResult(DataSourceReader.scala:56)
	at com.dataexa.cp.base.datasource.DataSourceReader$.main(DataSourceReader.scala:125)
	at com.dataexa.cp.base.datasource.DataSourceReader.main(DataSourceReader.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
           

2、代碼中報錯的部分是這裡

case class DataBaseToDF(sparkSession: SparkSession){

  def convert(dataBase: DataBase): DataFrame = {

    val dataFrame = sparkSession.read.format(dataBase.getDbType)
        .options(Map("url" -> dataBase.getUrl,
        "inferschema" -> "true",
        "dbtable" -> dataBase.getTableName,
        "user" -> dataBase.getUsername,
        "password" -> dataBase.getPassword))
//      .option ( "inferschema", "true" )
//      .option("url",dataBase.getUrl)
//      .option("dbtable",dataBase.getTableName)
//      .option("user",dataBase.getUsername)
//      .option("password",dataBase.getPassword)
      .load()

    dataFrame
  }
}
           

spark讀取mysql資料庫的時候報的錯。。。。

代碼中加上driver的配置項,問題解決,同僚寫的時候說是不需要加,真是坑死人!

case class DataBaseToDF(sparkSession: SparkSession){
      def convert(dataBase: DataBase): DataFrame = {
        val dataFrame = sparkSession.read.format(dataBase.getDbType)
            .options(Map("url" -> dataBase.getUrl,
            "inferschema" -> "true",
            "driver" -> "com.mysql.jdbc.Driver",
            "dbtable" -> dataBase.getTableName,
            "user" -> dataBase.getUsername,
            "password" -> dataBase.getPassword))
    //      .option ( "inferschema", "true" )
    //      .option("url",dataBase.getUrl)
    //      .option("dbtable",dataBase.getTableName)
    //      .option("user",dataBase.getUsername)
    //      .option("password",dataBase.getPassword)
          .load()
    
        dataFrame
      }
    }
           

繼續閱讀