首页 文章

ClassNotFoundException spark-submit scala

提问于
浏览
2

嗨,我正在尝试生成Salt Examples的输出,但不使用其文档中提到的docker . 我找到了有助于生成输出的scala代码,即Main.scala . 我把Main.scala修改成一个方便的,

package BinExTest
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.Row

import software.uncharted.salt.core.projection.numeric._
import software.uncharted.salt.core.generation.request._
import software.uncharted.salt.core.generation.Series
import software.uncharted.salt.core.generation.TileGenerator
import software.uncharted.salt.core.generation.output.SeriesData
import software.uncharted.salt.core.analytic.numeric._

import java.io._

import scala.util.parsing.json.JSONObject

object Main {

  // Defines the tile size in both x and y bin dimensions
  val tileSize = 256

  // Defines the output layer name
  val layerName = "pickups"

  // Creates and returns an Array of Double values encoded as 64bit Integers
  def createByteBuffer(tile: SeriesData[(Int, Int, Int), (Int, Int), Double, (Double, Double)]): Array[Byte] = {
    val byteArray = new Array[Byte](tileSize * tileSize * 8)
    var j = 0
    tile.bins.foreach(b => {
      val data = java.lang.Double.doubleToLongBits(b)
      for (i <- 0 to 7) {
        byteArray(j) = ((data >> (i * 8)) & 0xff).asInstanceOf[Byte]
        j += 1
      }
    })
    byteArray
  }

  def main(args: Array[String]): Unit = {

    val jarFile = "/home/kesava/Studies/BinExTest/BinExTest.jar"; 
    val inputPath = "/home/kesava/Downloads/taxi_micro.csv"
    val outputPath = "/home/kesava/SoftWares/salt/salt-examples/bin-example/Output"

    val conf = new SparkConf().setAppName("salt-bin-example").setJars(Array(jarFile))
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)

    sqlContext.read.format("com.databricks.spark.csv")
      .option("header", "true")
      .option("inferSchema", "true")
      .load(s"file://$inputPath")
      .registerTempTable("taxi_micro")

    // Construct an RDD of Rows containing only the fields we need. Cache the result
    val input = sqlContext.sql("select pickup_lon, pickup_lat from taxi_micro")
      .rdd.cache()

    // Given an input row, return pickup longitude, latitude as a tuple
    val pickupExtractor = (r: Row) => {
      if (r.isNullAt(0) || r.isNullAt(1)) {
        None
      } else {
        Some((r.getDouble(0), r.getDouble(1)))
      }
    }

    // Tile Generator object, which houses the generation logic
    val gen = TileGenerator(sc)

    // Break levels into batches. Process several higher levels at once because the
    // number of tile outputs is quite low. Lower levels done individually due to high tile counts.
    val levelBatches = List(List(0, 1, 2, 3, 4, 5, 6, 7, 8), List(9, 10, 11), List(12), List(13), List(14))

    // Iterate over sets of levels to generate.
    val levelMeta = levelBatches.map(level => {

      println("------------------------------")
      println(s"Generating level $level")
      println("------------------------------")

      // Construct the definition of the tiling jobs: pickups
      val pickups = new Series((tileSize - 1, tileSize - 1),
        pickupExtractor,
        new MercatorProjection(level),
        (r: Row) => Some(1),
        CountAggregator,
        Some(MinMaxAggregator))

      // Create a request for all tiles on these levels, generate
      val request = new TileLevelRequest(level, (coord: (Int, Int, Int)) => coord._1)
      val rdd = gen.generate(input, pickups, request)

      // Translate RDD of Tiles to RDD of (coordinate,byte array), collect to master for serialization
      val output = rdd
        .map(s => pickups(s).get)
        .map(tile => {
          // Return tuples of tile coordinate, byte array
          (tile.coords, createByteBuffer(tile))
        })
        .collect()

      // Save byte files to local filesystem
      output.foreach(tile => {
        val coord = tile._1
        val byteArray = tile._2
        val limit = (1 << coord._1) - 1
        // Use standard TMS path structure and file naming
        val file = new File(s"$outputPath/$layerName/${coord._1}/${coord._2}/${limit - coord._3}.bins")
        file.getParentFile.mkdirs()
        val output = new FileOutputStream(file)
        output.write(byteArray)
        output.close()
      })

      // Create map from each level to min / max values.
      rdd
        .map(s => pickups(s).get)
        .map(t => (t.coords._1.toString, t.tileMeta.get))
        .reduceByKey((l, r) => {
          (Math.min(l._1, r._1), Math.max(l._2, r._2))
        })
        .mapValues(minMax => {
          JSONObject(Map(
            "min" -> minMax._1,
            "max" -> minMax._2
          ))
        })
        .collect()
        .toMap
    })

    // Flatten array of maps into a single map
    val levelInfoJSON = JSONObject(levelMeta.reduce(_ ++ _)).toString()
    // Save level metadata to filesystem
    val pw = new PrintWriter(s"$outputPath/$layerName/meta.json")
    pw.write(levelInfoJSON)
    pw.close()

  }
}

我为这个scala创建了一个单独的文件夹,其中另一个名为lib的文件夹,其中包含了所需的jar,我用 scalac 编译它,如下所示,

scalac -cp“lib / salt.jar:lib / spark.jar”Main.scala

这成功运行并在BinExTest文件夹下生成类 .

现在,该项目的build.gradle具有以下代码行,用于确定这是有助于生成输出数据集的命令,

task run(overwrite: true, type: Exec, dependsOn: [assemble]) {
  executable = 'spark-submit'
  args = ["--class","software.uncharted.salt.examples.bin.Main","/opt/salt/build/libs/salt-bin-example-${version}.jar", "/opt/data/taxi_one_day.csv", "/opt/output"]
}

看到这个,我做了以下命令,

spark-submit --class BinExTest.Main lib / salt.jar

当我这样做时,我收到以下错误,

java.lang.ClassNotFoundException:java.net.URLClassLoader $ 1.run(URLClassLoader.java:366)中的Main.BinExTest,java.security.AccessController.doPrivileged的java.net.URLClassLoader $ 1.run(URLClassLoader.java:355) (java方法)java.net.URLClassLoader.findClass(URLClassLoader.java:354)java.lang.ClassLoader.loadClass(ClassLoader.java:425)java.lang.ClassLoader.loadClass(ClassLoader.java:358)at at位于org.apache的org.apache.spark.util.Utils $ .classForName(Utils.scala:174)java.lang.Class.forName(Class.java:278)的java.lang.Class.forName0(Native Method) .spark.deploy.SparkSubmit $ .org $ apache $ spark $ deploy $ org.apache.spark.deploy.SparkSubmit $ .doRunMain $ 1(SparkSubmit.scala:181)在org上的$ SparkSubmit $$ runMain(SparkSubmit.scala:689)位于org.apache.spark.deploy.SparkSubmit.main的org.apache.spark.deploy.SparkSubmit $ .main(SparkSubmit.scala:121)中的.apache.spark.deploy.SparkSubmit $ .submit(SparkSubmit.scala:206) (SparkSubmit.scala)

有人可以帮助我吗?我对此完全陌生,只是通过探索来实现这一目标 .


[更新1]

接受YoYo的建议,

spark-submit --class BinExTest.Main --jars“BinExTest.jar”“lib / salt.jar”

我得到了ClassNotFoundException,产生了新的错误,如下所示,

线程“main”中的异常org.apache.spark.SparkException:作业因阶段失败而中止:阶段3.0中的任务1失败1次,最近失败:阶段3.0中失去的任务1.0(TID 6,localhost):java . lang.NoSuchMethodError:scala.runtime.IntRef.create(I)Lscala / runtime / IntRef; at BinExTest.Main $ .createByteBuffer(Main.scala:29)at BinExTest.Main $$ anonfun $ 2 $$ anonfun $ 6.apply(Main.scala:101)at BinExTest.Main $$ anonfun $ 2 $$ anonfun $ 6.apply( Main.scala:99)scala.collection.Aterator上的scala.collection.Iterator $$ anon $ 11.next(Iterator.scala:328)scala.collection.Iterator $ class.foreach(Iterator.scala:727)at scala.collection.AbstractIterator.foreach (Iterator.scala:1157)scala.collection.generic.Growable $ class . $ plus $ plus $ eq(Growable.scala:48)at scala.collection.mutable.ArrayBuffer . $ plus $ plus $ eq(ArrayBuffer.scala :103)scala.collection.mlection.ArurayBuffer . $ plus $ plus $ eq(ArrayBuffer.scala:47)at scala.collection.TraversableOnce $ class.to(TraversableOnce.scala:273)at scala.collection.AbstractIterator.to (Iterator.scala:1157)scala.collection.TraversableOnce $ class.toBuffer(TraversableOnce.scala:265)at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)at scala.collection.TraversableOnce $ class.toArray( TraversableOnce.scala:252)at scala.collection.AbstractIterator.toArray(Itera) tor.scala:1157)在org.apache.spark.rdd.RDD $$ anonfun $ collect $ 1 $$ anonfun $ 12.apply(RDD.scala:927)org.apache.spark.rdd.RDD $$ anonfun $ collect $ 1 $$ anonfun $ 12.apply(RDD.scala:927)org.apache.spark.SparkContext $$ anonfun $ runJob $ 5.apply(SparkContext.scala:1858)org.apache.spark.SparkContext $$ anonfun $ runJob $ 5.apply(SparkContext.scala:1858)org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)at org.apache.spark.scheduler.Task.run(Task.scala:89)at org .apache.spark.executor.Executor $ TaskRunner.run(Executor.scala:214)at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)at java.util.concurrent.ThreadPoolExecutor $ Worker.run(ThreadPoolExecutor) .java:615)在java.lang.Thread.run(Thread.java:745)

知道发生了什么事吗?


[更新2]

使用Scala2.11支持从源代码构建Spark解决了我之前的问题 . 但是我遇到了一个新错误,它是,

6/05/10 18:39:15错误TaskSetManager:阶段2.0中的任务0失败1次;中止作业线程“main”中的异常org.apache.spark.SparkException:作业因阶段失败而中止:阶段2.0中的任务0失败1次,最近失败:阶段2.0中失去的任务0.0(TID 3,localhost):java.lang.NoClassDefFoundError:scala / collection / GenTraversableOnce $ class at software.uncharted.salt.core.util.SparseArray . (SparseArray.scala:37)at software.uncharted.salt.core.util.SparseArray . (SparseArray.scala:57)at software.uncharted.salt .core.generation.rdd.RDDSeriesWrapper.makeBins(RDDTileGenerator.scala:224)at software.uncharted.salt.core.generation.rdd.RDDTileGeneratorCombiner.createCombiner(RDDTileGenerator.scala:128)at software.uncharted.salt.core.generation位于org.apache.spark的software.uncharted.salt.core.generation.rdd.RDDTileGenerator $$ anonfun $ 3.apply(RDDTileGenerator.scala:100)的.rdd.RDDTileGenerator $$ anonfun $ 3.apply(RDDTileGenerator.scala:100) .util.collection.ExternalSorter $$ anonfun $ 5.apply(ExternalSorter.scala:187)at org.apache.spark.util.collection.ExternalSorter $$ anonfun $ 5.apply(ExternalSorter . scala:186)atg.apache.spark.util.collection.AppendOnlyMap.changeValue(AppendOnlyMap.scala:148)atg.apache.spark.util.collection.SizeTrackingAppendOnlyMap.changeValue(SizeTrackingAppendOnlyMap.scala:32)at org.apache .gark.util.collection.ExternalSorter.insertAll(ExternalSorter.scala:192)atg.apache.spark.shuffle.sort.ShuffleWriter.write(SortShuffleWriter.scala:64)at org.apache.spark.scheduler.ShuffleMapTask.runTask (ShuffleMapTask.scala:73)位于org.apache的org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)org.apache.spark.scheduler.Task.run(Task.scala:89) . spark.executor.Executor $ TaskRunner.run(Executor.scala:214)at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)at java.util.concurrent.ThreadPoolExecutor $ Worker.run(ThreadPoolExecutor.java: 615)at java.lang.Thread.run(Thread.java:745)引起:java.lang.ClassNotFoundException:java.net.URLClassLoader $ 1.run(URLClassLo)中的scala.collection.GenTraversableOnce $ class ader.java:366)java.net.URLClassLoader $ 1.run(URLClassLoader.java:355)at java.security.AccessController.doPrivileged(Native Method)at java.net.URLClassLoader.findClass(URLClassLoader.java:354)at at java.lang.ClassLoader.loadClass(ClassLoader.java:425)java.lang.ClassLoader.loadClass(ClassLoader.java:358)

这是因为scala2.11没有提到的类吗?


[最终更新]

将scala2.10添加到spark-submit中就可以了 .

spark-submit --class“BinExTest.Main”--jars“BinExTest.jar,lib / scala210.jar”“lib / salt.jar”

1 回答

  • 3

    要运行Spark作业,需要在构成spark群集的不同节点上自行复制代码 . 它通过将jar文件复制到其他节点来实现 .

    这意味着您需要确保将类文件打包在.jar文件中 . 在我的典型解决方案中,我将构建一个Uber jar,它将类文件和依赖jar文件打包在一个.jar文件中 . 为此,我使用Maven Shade plugin . 这不一定是你的解决方案,但至少你应该从生成的类中构建一个.jar文件 .

    要手动提供其他jar文件 - 您需要使用 --jars 选项添加它们,这将使用逗号分隔列表 .

    更新1

    实际上,即使对我来说,所有可用选项都存在很多混淆,特别是jar文件及其分布方式,或修改spark中的类路径 . See another topic I just posted .

    更新2

    对于问题的第二部分,已在another thread上回答 .

相关问题