无法在macbook上本地运行Spark示例,错误"在阶段0.0"中丢失任务1.0

时间:2014-11-19 09:55:52

标签: apache-spark

我安装了spark并运行“run-example SparkPi 10”,错误信息是: “spark-submit examples / src / main / python / pi.py 10”也有类似的错误。

14/11/19 17:08:04 INFO Executor: Running task 2.0 in stage 0.0 (TID 2)
14/11/19 17:08:04 INFO Executor: Running task 3.0 in stage 0.0 (TID 3)
14/11/19 17:08:04 INFO Executor: Running task 1.0 in stage 0.0 (TID 1)
14/11/19 17:08:04 INFO Executor: Running task 0.0 in stage 0.0 (TID 0)
14/11/19 17:08:04 INFO Executor: Running task 5.0 in stage 0.0 (TID 5)
14/11/19 17:08:04 INFO Executor: Running task 6.0 in stage 0.0 (TID 6)
14/11/19 17:08:04 INFO Executor: Running task 4.0 in stage 0.0 (TID 4)
14/11/19 17:08:04 INFO Executor: Running task 7.0 in stage 0.0 (TID 7)
14/11/19 17:08:04 INFO Executor: Fetching http://192.168.1.80:57278/jars/spark-examples-1.1.0-hadoop2.4.0.jar with timestamp 1416388083980
14/11/19 17:08:04 INFO Utils: Fetching http://192.168.1.80:57278/jars/spark-examples-1.1.0-hadoop2.4.0.jar to /var/folders/6k/nww6s1p52yg424zdcckvpwvc0000gn/T/fetchFileTemp6287870778953166340.tmp
14/11/19 17:09:04 INFO Executor: Fetching http://192.168.1.80:57278/jars/spark-examples-1.1.0-hadoop2.4.0.jar with timestamp 1416388083980
14/11/19 17:09:04 INFO Utils: Fetching http://192.168.1.80:57278/jars/spark-examples-1.1.0-hadoop2.4.0.jar to /var/folders/6k/nww6s1p52yg424zdcckvpwvc0000gn/T/fetchFileTemp6122384738311225749.tmp
**14/11/19 17:09:04 ERROR Executor: Exception in task 1.0 in stage 0.0 (TID 1)
java.net.SocketTimeoutException: Read timed out**
    at java.net.SocketInputStream.socketRead0(Native Method)
    at java.net.SocketInputStream.read(SocketInputStream.java:150)
    at java.net.SocketInputStream.read(SocketInputStream.java:121)
    at java.io.BufferedInputStream.fill(BufferedInputStream.java:246)
    at java.io.BufferedInputStream.read1(BufferedInputStream.java:286)
    at java.io.BufferedInputStream.read(BufferedInputStream.java:345)
    at sun.net.www.http.HttpClient.parseHTTPHeader(HttpClient.java:703)
    at sun.net.www.http.HttpClient.parseHTTP(HttpClient.java:647)
    at sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1534)
    at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1439)
    at org.apache.spark.util.Utils$.fetchFile(Utils.scala:376)
    at org.apache.spark.executor.Executor$$anonfun$org$apache$spark$executor$Executor$$updateDependencies$6.apply(Executor.scala:325)
    at org.apache.spark.executor.Executor$$anonfun$org$apache$spark$executor$Executor$$updateDependencies$6.apply(Executor.scala:323)
    at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:772)
    at scala.collection.mutable.HashMap$$anonfun$foreach$1.apply(HashMap.scala:98)
    at scala.collection.mutable.HashMap$$anonfun$foreach$1.apply(HashMap.scala:98)
    at scala.collection.mutable.HashTable$class.foreachEntry(HashTable.scala:226)
    at scala.collection.mutable.HashMap.foreachEntry(HashMap.scala:39)
    at scala.collection.mutable.HashMap.foreach(HashMap.scala:98)
    at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:771)
    at org.apache.spark.executor.Executor.org$apache$spark$executor$Executor$$updateDependencies(Executor.scala:323)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:158)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    at java.lang.Thread.run(Thread.java:745)
14/11/19 17:09:04 INFO TaskSetManager: Starting task 8.0 in stage 0.0 (TID 8, localhost, PROCESS_LOCAL, 1229 bytes)
**14/11/19 17:09:04 INFO Executor: Running task 8.0 in stage 0.0 (TID 8)
14/11/19 17:09:04 WARN TaskSetManager: Lost task 1.0 in stage 0.0 (TID 1, localhost): java.net.SocketTimeoutException: Read timed out**
        java.net.SocketInputStream.socketRead0(Native Method)
        java.net.SocketInputStream.read(SocketInputStream.java:150)
        java.net.SocketInputStream.read(SocketInputStream.java:121)
        java.io.BufferedInputStream.fill(BufferedInputStream.java:246)
        java.io.BufferedInputStream.read1(BufferedInputStream.java:286)
        java.io.BufferedInputStream.read(BufferedInputStream.java:345)
        sun.net.www.http.HttpClient.parseHTTPHeader(HttpClient.java:703)
        sun.net.www.http.HttpClient.parseHTTP(HttpClient.java:647)
        sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1534)
        sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1439)
        org.apache.spark.util.Utils$.fetchFile(Utils.scala:376)
        org.apache.spark.executor.Executor$$anonfun$org$apache$spark$executor$Executor$$updateDependencies$6.apply(Executor.scala:325)
        org.apache.spark.executor.Executor$$anonfun$org$apache$spark$executor$Executor$$updateDependencies$6.apply(Executor.scala:323)
        scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:772)
        scala.collection.mutable.HashMap$$anonfun$foreach$1.apply(HashMap.scala:98)
        scala.collection.mutable.HashMap$$anonfun$foreach$1.apply(HashMap.scala:98)
        scala.collection.mutable.HashTable$class.foreachEntry(HashTable.scala:226)
        scala.collection.mutable.HashMap.foreachEntry(HashMap.scala:39)
        scala.collection.mutable.HashMap.foreach(HashMap.scala:98)
        scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:771)
        org.apache.spark.executor.Executor.org$apache$spark$executor$Executor$$updateDependencies(Executor.scala:323)
        org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:158)
        java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        java.lang.Thread.run(Thread.java:745)
14/11/19 17:09:04 ERROR TaskSetManager: Task 1 in stage 0.0 failed 1 times; aborting job
14/11/19 17:09:04 INFO TaskSchedulerImpl: Cancelling stage 0
14/11/19 17:09:04 INFO Executor: Executor is trying to kill task 0.0 in stage 0.0 (TID 0)
14/11/19 17:09:04 INFO TaskSchedulerImpl: Stage 0 was cancelled
14/11/19 17:09:04 INFO Executor: Executor is trying to kill task 5.0 in stage 0.0 (TID 5)
14/11/19 17:09:04 INFO Executor: Executor is trying to kill task 2.0 in stage 0.0 (TID 2)
14/11/19 17:09:04 INFO Executor: Executor is trying to kill task 6.0 in stage 0.0 (TID 6)
14/11/19 17:09:04 INFO Executor: Executor is trying to kill task 3.0 in stage 0.0 (TID 3)
14/11/19 17:09:04 INFO Executor: Executor is trying to kill task 7.0 in stage 0.0 (TID 7)
14/11/19 17:09:04 INFO Executor: Executor is trying to kill task 4.0 in stage 0.0 (TID 4)
14/11/19 17:09:04 INFO Executor: Executor is trying to kill task 8.0 in stage 0.0 (TID 8)
14/11/19 17:09:04 INFO DAGScheduler: Failed to run reduce at SparkPi.scala:35
**Exception in thread "main" org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 0.0 failed 1 times, most recent failure: Lost task 1.0 in stage 0.0 (TID 1, localhost): java.net.SocketTimeoutException: Read timed out**
        java.net.SocketInputStream.socketRead0(Native Method)
        java.net.SocketInputStream.read(SocketInputStream.java:150)
        java.net.SocketInputStream.read(SocketInputStream.java:121)
        java.io.BufferedInputStream.fill(BufferedInputStream.java:246)
        java.io.BufferedInputStream.read1(BufferedInputStream.java:286)
        java.io.BufferedInputStream.read(BufferedInputStream.java:345)
        sun.net.www.http.HttpClient.parseHTTPHeader(HttpClient.java:703)
        sun.net.www.http.HttpClient.parseHTTP(HttpClient.java:647)
        sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1534)
        sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1439)
        org.apache.spark.util.Utils$.fetchFile(Utils.scala:376)
        org.apache.spark.executor.Executor$$anonfun$org$apache$spark$executor$Executor$$updateDependencies$6.apply(Executor.scala:325)
        org.apache.spark.executor.Executor$$anonfun$org$apache$spark$executor$Executor$$updateDependencies$6.apply(Executor.scala:323)
        scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:772)
        scala.collection.mutable.HashMap$$anonfun$foreach$1.apply(HashMap.scala:98)
        scala.collection.mutable.HashMap$$anonfun$foreach$1.apply(HashMap.scala:98)
        scala.collection.mutable.HashTable$class.foreachEntry(HashTable.scala:226)
        scala.collection.mutable.HashMap.foreachEntry(HashMap.scala:39)
        scala.collection.mutable.HashMap.foreach(HashMap.scala:98)
        scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:771)
        org.apache.spark.executor.Executor.org$apache$spark$executor$Executor$$updateDependencies(Executor.scala:323)
        org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:158)
        java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
    at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1185)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1174)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1173)
    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
    at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1173)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:688)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:688)
    at scala.Option.foreach(Option.scala:236)
    at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:688)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1391)
    at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
    at akka.actor.ActorCell.invoke(ActorCell.scala:456)
    at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
    at akka.dispatch.Mailbox.run(Mailbox.scala:219)
    at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
    at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
    at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
    at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
    at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)

我用Google搜索并没有得到任何提示。有人可以帮忙吗?谢谢:))

谢谢, 超

0 个答案:

没有答案
相关问题