您的位置:首页 > 其它

关于spark-submit 使用yarn-client客户端提交spark任务的问题

2017-07-13 16:59 567 查看
rpc连接超时

17/07/13 16:18:48 WARN NettyRpcEndpointRef: Error sending message [message = AMRegisted(enjoyor4,container_e02_1499931395900_0009_01_000001,26009,admin)] in 1 attempts

org.apache.spark.rpc.RpcTimeoutException: Cannot receive any reply in 120 seconds. This timeout is controlled by spark.rpc.askTimeout

        at org.apache.spark.rpc.RpcTimeout.org$apache$spark$rpc$RpcTimeout$$createRpcTimeoutException(RpcTimeout.scala:48)

        at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:63)

        at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

        at scala.runtime.AbstractPartialFunction.apply(AbstractPartialFunction.scala:33)

        at scala.util.Failure$$anonfun$recover$1.apply(Try.scala:185)

        at scala.util.Try$.apply(Try.scala:161)

        at scala.util.Failure.recover(Try.scala:185)

        at scala.concurrent.Future$$anonfun$recover$1.apply(Future.scala:324)

        at scala.concurrent.Future$$anonfun$recover$1.apply(Future.scala:324)

        at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)

        at org.spark-project.guava.util.concurrent.MoreExecutors$SameThreadExecutorService.execute(MoreExecutors.java:293)

        at scala.concurrent.impl.ExecutionContextImpl$$anon$1.execute(ExecutionContextImpl.scala:133)

        at scala.concurrent.impl.CallbackRunnable.executeWithValue(Promise.scala:40)

        at scala.concurrent.impl.Promise$DefaultPromise.tryComplete(Promise.scala:248)

        at scala.concurrent.Promise$class.complete(Promise.scala:55)

        at scala.concurrent.impl.Promise$DefaultPromise.complete(Promise.scala:153)

        at scala.concurrent.Future$$anonfun$map$1.apply(Future.scala:235)

        at scala.concurrent.Future$$anonfun$map$1.apply(Future.scala:235)

        at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)

        at scala.concurrent.Future$InternalCallbackExecutor$Batch$$anonfun$run$1.processBatch$1(Future.scala:643)

        at scala.concurrent.Future$InternalCallbackExecutor$Batch$$anonfun$run$1.apply$mcV$sp(Future.scala:658)

        at scala.concurrent.Future$InternalCallbackExecutor$Batch$$anonfun$run$1.apply(Future.scala:635)

        at scala.concurrent.Future$InternalCallbackExecutor$Batch$$anonfun$run$1.apply(Future.scala:635)

        at scala.concurrent.BlockContext$.withBlockContext(BlockContext.scala:72)

        at scala.concurrent.Future$InternalCallbackExecutor$Batch.run(Future.scala:634)

        at scala.concurrent.Future$InternalCallbackExecutor$.scala$concurrent$Future$InternalCallbackExecutor$$unbatchedExecute(Future.scala:694)

        at scala.concurrent.Future$InternalCallbackExecutor$.execute(Future.scala:685)

        at scala.concurrent.impl.CallbackRunnable.executeWithValue(Promise.scala:40)

        at scala.concurrent.impl.Promise$DefaultPromise.tryComplete(Promise.scala:248)

        at scala.concurrent.Promise$class.tryFailure(Promise.scala:115)

        at scala.concurrent.impl.Promise$DefaultPromise.tryFailure(Promise.scala:153)

        at org.apache.spark.rpc.netty.NettyRpcEnv$$anon$1.run(NettyRpcEnv.scala:225)

        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)

        at java.util.concurrent.FutureTask.run(FutureTask.java:266)

        at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180)

        at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)

        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)

        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)

        at java.lang.Thread.run(Thread.java:745)

Caused by: java.util.concurrent.TimeoutException: Cannot receive any reply in 120 seconds

        at org.apache.spark.rpc.netty.NettyRpcEnv$$anon$1.run(NettyRpcEnv.scala:226)

        ... 7 more

17/07/13 16:18:49 WARN NettyRpcEndpointRef: Error sending message [message = RegisterBlockManager(BlockManagerId(driver, 192.168.56.119, 23159),280248975,NettyRpcEndpointRef(spark://BlockManagerEndpoint1@192.168.56.119:23403))] in 1 attempts

org.apache.spark.rpc.RpcTimeoutException: Cannot receive any reply in 120 seconds. This timeout is controlled by spark.rpc.askTimeout

        at org.apache.spark.rpc.RpcTimeout.org$apache$spark$rpc$RpcTimeout$$createRpcTimeoutException(RpcTimeout.scala:48)

        at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:63)

        at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

        at scala.runtime.AbstractPartialFunction.apply(AbstractPartialFunction.scala:33)

        at scala.util.Failure$$anonfun$recover$1.apply(Try.scala:185)

        at scala.util.Try$.apply(Try.scala:161)

        at scala.util.Failure.recover(Try.scala:185)

        at scala.concurrent.Future$$anonfun$recover$1.apply(Future.scala:324)

        at scala.concurrent.Future$$anonfun$recover$1.apply(Future.scala:324)

        at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)

        at org.spark-project.guava.util.concurrent.MoreExecutors$SameThreadExecutorService.execute(MoreExecutors.java:293)

        at scala.concurrent.impl.ExecutionContextImpl$$anon$1.execute(ExecutionContextImpl.scala:133)

        at scala.concurrent.impl.CallbackRunnable.executeWithValue(Promise.scala:40)

        at scala.concurrent.impl.Promise$DefaultPromise.tryComplete(Promise.scala:248)

        at scala.concurrent.Promise$class.complete(Promise.scala:55)

        at scala.concurrent.impl.Promise$DefaultPromise.complete(Promise.scala:153)

        at scala.concurrent.Future$$anonfun$map$1.apply(Future.scala:235)

        at scala.concurrent.Future$$anonfun$map$1.apply(Future.scala:235)

        at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)

        at scala.concurrent.Future$InternalCallbackExecutor$Batch$$anonfun$run$1.processBatch$1(Future.scala:643)

        at scala.concurrent.Future$InternalCallbackExecutor$Batch$$anonfun$run$1.apply$mcV$sp(Future.scala:658)

        at scala.concurrent.Future$InternalCallbackExecutor$Batch$$anonfun$run$1.apply(Future.scala:635)

        at scala.concurrent.Future$InternalCallbackExecutor$Batch$$anonfun$run$1.apply(Future.scala:635)

        at scala.concurrent.BlockContext$.withBlockContext(BlockContext.scala:72)

        at scala.concurrent.Future$InternalCallbackExecutor$Batch.run(Future.scala:634)

        at scala.concurrent.Future$InternalCallbackExecutor$.scala$concurrent$Future$InternalCallbackExecutor$$unbatchedExecute(Future.scala:694)

        at scala.concurrent.Future$InternalCallbackExecutor$.execute(Future.scala:685)

        at scala.concurrent.impl.CallbackRunnable.executeWithValue(Promise.scala:40)

        at scala.concurrent.impl.Promise$DefaultPromise.tryComplete(Promise.scala:248)

        at scala.concurrent.Promise$class.tryFailure(Promise.scala:115)

        at scala.concurrent.impl.Promise$DefaultPromise.tryFailure(Promise.scala:153)

        at org.apache.spark.rpc.netty.NettyRpcEnv$$anon$1.run(NettyRpcEnv.scala:225)

        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)

        at java.util.concurrent.FutureTask.run(FutureTask.java:266)

        at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180)

        at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)

        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)

        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)

        at java.lang.Thread.run(Thread.java:745)

Caused by: java.util.concurrent.TimeoutException: Cannot receive any reply in 120 seconds

        at org.apache.spark.rpc.netty.NettyRpcEnv$$anon$1.run(NettyRpcEnv.scala:226)
        ... 7 more

解决:

首先查看你的客户机是否是单核,此问题有可能是因为你的虚拟机的硬件问题

一: 更改你的虚拟机处理器改为多核的。

二: 在你spark客户端的conf目录中的 spark-defaults.conf 配置文件中加上这个参数 spark.rpc.netty.dispatcher.numThreads=2 即可解决
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: