-
-
Save YuvalItzchakov/cdbdd7f67604557fccfbcce673c49e5d to your computer and use it in GitHub Desktop.
Kafka Consumer Leak
This file has been truncated, but you can view the full file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
351 descriptors are open | |
#1 selector by thread:Executor task launch worker for task 6658 on Wed Aug 01 06:18:34 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#2 selector by thread:Executor task launch worker for task 6755 on Wed Aug 01 06:18:44 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#3 selector by thread:Executor task launch worker for task 6803 on Wed Aug 01 06:18:45 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#4 selector by thread:Executor task launch worker for task 7057 on Wed Aug 01 06:18:55 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#5 selector by thread:Executor task launch worker for task 7073 on Wed Aug 01 06:18:56 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#6 selector by thread:Executor task launch worker for task 7104 on Wed Aug 01 06:18:57 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#7 selector by thread:Executor task launch worker for task 7346 on Wed Aug 01 06:19:06 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#8 selector by thread:Executor task launch worker for task 7395 on Wed Aug 01 06:19:07 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#9 selector by thread:Executor task launch worker for task 7442 on Wed Aug 01 06:19:09 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#10 selector by thread:Executor task launch worker for task 7667 on Wed Aug 01 06:19:17 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#11 selector by thread:Executor task launch worker for task 7888 on Wed Aug 01 06:19:25 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#12 selector by thread:Executor task launch worker for task 7946 on Wed Aug 01 06:19:27 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#13 selector by thread:Executor task launch worker for task 8042 on Wed Aug 01 06:19:31 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#14 selector by thread:Executor task launch worker for task 8258 on Wed Aug 01 06:19:39 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#15 selector by thread:Executor task launch worker for task 8529 on Wed Aug 01 06:19:49 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#16 selector by thread:Executor task launch worker for task 9497 on Wed Aug 01 06:20:26 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#17 selector by thread:Executor task launch worker for task 11554 on Wed Aug 01 06:22:13 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#18 selector by thread:Executor task launch worker for task 12736 on Wed Aug 01 06:22:58 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#19 selector by thread:Executor task launch worker for task 14490 on Wed Aug 01 06:24:07 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#20 selector by thread:Executor task launch worker for task 15803 on Wed Aug 01 06:25:25 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#21 selector by thread:Executor task launch worker for task 19811 on Wed Aug 01 06:28:22 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#22 selector by thread:Executor task launch worker for task 24402 on Wed Aug 01 06:31:43 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#23 selector by thread:Executor task launch worker for task 27323 on Wed Aug 01 06:33:33 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#24 selector by thread:Executor task launch worker for task 28106 on Wed Aug 01 06:34:28 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#25 selector by thread:Executor task launch worker for task 28136 on Wed Aug 01 06:34:33 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#26 selector by thread:Executor task launch worker for task 28299 on Wed Aug 01 06:34:40 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#27 selector by thread:Executor task launch worker for task 28355 on Wed Aug 01 06:34:42 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#28 selector by thread:Executor task launch worker for task 28651 on Wed Aug 01 06:34:52 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#29 selector by thread:Executor task launch worker for task 28656 on Wed Aug 01 06:34:52 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#30 selector by thread:Executor task launch worker for task 28896 on Wed Aug 01 06:35:02 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#31 selector by thread:Executor task launch worker for task 28907 on Wed Aug 01 06:35:02 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#32 selector by thread:Executor task launch worker for task 28913 on Wed Aug 01 06:35:02 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#33 selector by thread:Executor task launch worker for task 29099 on Wed Aug 01 06:35:10 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#34 selector by thread:Executor task launch worker for task 29186 on Wed Aug 01 06:35:12 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#35 selector by thread:Executor task launch worker for task 29195 on Wed Aug 01 06:35:12 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#36 selector by thread:Executor task launch worker for task 29451 on Wed Aug 01 06:35:22 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#37 selector by thread:Executor task launch worker for task 29459 on Wed Aug 01 06:35:22 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#38 selector by thread:Executor task launch worker for task 29650 on Wed Aug 01 06:35:30 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#39 selector by thread:Executor task launch worker for task 29712 on Wed Aug 01 06:35:32 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#40 selector by thread:Executor task launch worker for task 29738 on Wed Aug 01 06:35:32 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#41 selector by thread:Executor task launch worker for task 29962 on Wed Aug 01 06:35:40 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#42 selector by thread:Executor task launch worker for task 30761 on Wed Aug 01 06:36:11 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#43 selector by thread:Executor task launch worker for task 31435 on Wed Aug 01 06:36:36 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#44 selector by thread:Executor task launch worker for task 32185 on Wed Aug 01 06:37:33 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#45 selector by thread:Executor task launch worker for task 36059 on Wed Aug 01 06:40:25 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#46 selector by thread:Executor task launch worker for task 36282 on Wed Aug 01 06:40:33 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#47 selector by thread:Executor task launch worker for task 36538 on Wed Aug 01 06:40:41 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#48 selector by thread:Executor task launch worker for task 36546 on Wed Aug 01 06:40:42 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#49 selector by thread:Executor task launch worker for task 39731 on Wed Aug 01 06:43:09 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#50 selector by thread:Executor task launch worker for task 45123 on Wed Aug 01 06:47:03 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#51 selector by thread:Executor task launch worker for task 45664 on Wed Aug 01 06:47:23 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#52 selector by thread:Executor task launch worker for task 47050 on Wed Aug 01 06:48:40 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#53 selector by thread:Executor task launch worker for task 47240 on Wed Aug 01 06:48:54 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#54 selector by thread:Executor task launch worker for task 47451 on Wed Aug 01 06:49:02 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#55 selector by thread:Executor task launch worker for task 50728 on Wed Aug 01 06:51:32 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#56 selector by thread:Executor task launch worker for task 54656 on Wed Aug 01 06:54:28 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#57 selector by thread:Executor task launch worker for task 55731 on Wed Aug 01 06:55:07 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#58 selector by thread:Executor task launch worker for task 57634 on Wed Aug 01 06:56:48 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#59 selector by thread:Executor task launch worker for task 57642 on Wed Aug 01 06:56:48 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#60 selector by thread:Executor task launch worker for task 58635 on Wed Aug 01 06:57:25 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#61 selector by thread:Executor task launch worker for task 60771 on Wed Aug 01 06:59:13 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#62 selector by thread:Executor task launch worker for task 62370 on Wed Aug 01 07:00:13 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#63 selector by thread:Executor task launch worker for task 66593 on Wed Aug 01 07:03:23 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#64 selector by thread:Executor task launch worker for task 66993 on Wed Aug 01 07:04:04 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#65 selector by thread:Executor task launch worker for task 67882 on Wed Aug 01 07:04:39 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#66 selector by thread:Executor task launch worker for task 71315 on Wed Aug 01 07:07:23 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#67 selector by thread:Executor task launch worker for task 72002 on Wed Aug 01 07:07:49 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#68 selector by thread:Executor task launch worker for task 73025 on Wed Aug 01 07:08:55 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#69 selector by thread:Executor task launch worker for task 74883 on Wed Aug 01 07:10:15 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#70 selector by thread:Executor task launch worker for task 76400 on Wed Aug 01 07:11:42 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#71 selector by thread:Executor task launch worker for task 78947 on Wed Aug 01 07:13:21 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#72 selector by thread:Executor task launch worker for task 78979 on Wed Aug 01 07:13:49 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#73 selector by thread:Executor task launch worker for task 81625 on Wed Aug 01 07:16:05 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#74 selector by thread:Executor task launch worker for task 81921 on Wed Aug 01 07:16:20 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#75 selector by thread:Executor task launch worker for task 84145 on Wed Aug 01 07:17:44 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#76 selector by thread:Executor task launch worker for task 84600 on Wed Aug 01 07:18:27 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#77 selector by thread:Executor task launch worker for task 84720 on Wed Aug 01 07:18:36 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#78 selector by thread:Executor task launch worker for task 84810 on Wed Aug 01 07:18:39 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#79 selector by thread:Executor task launch worker for task 84976 on Wed Aug 01 07:18:45 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#80 selector by thread:Executor task launch worker for task 85354 on Wed Aug 01 07:18:59 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#81 selector by thread:Executor task launch worker for task 87330 on Wed Aug 01 07:20:17 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#82 selector by thread:Executor task launch worker for task 87336 on Wed Aug 01 07:20:42 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#83 selector by thread:Executor task launch worker for task 87643 on Wed Aug 01 07:20:57 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#84 selector by thread:Executor task launch worker for task 87915 on Wed Aug 01 07:21:07 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#85 selector by thread:Executor task launch worker for task 88291 on Wed Aug 01 07:21:21 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#86 selector by thread:Executor task launch worker for task 88714 on Wed Aug 01 07:21:37 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#87 selector by thread:Executor task launch worker for task 89225 on Wed Aug 01 07:21:57 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#88 selector by thread:Executor task launch worker for task 91970 on Wed Aug 01 07:24:21 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#89 selector by thread:Executor task launch worker for task 91995 on Wed Aug 01 07:24:22 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#90 selector by thread:Executor task launch worker for task 92787 on Wed Aug 01 07:24:54 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#91 selector by thread:Executor task launch worker for task 92818 on Wed Aug 01 07:25:24 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#92 selector by thread:Executor task launch worker for task 93650 on Wed Aug 01 07:25:58 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#93 selector by thread:Executor task launch worker for task 94722 on Wed Aug 01 07:26:40 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#94 selector by thread:Executor task launch worker for task 94937 on Wed Aug 01 07:26:49 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#95 selector by thread:Executor task launch worker for task 96264 on Wed Aug 01 07:28:10 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#96 selector by thread:Executor task launch worker for task 100257 on Wed Aug 01 07:31:57 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#97 selector by thread:Executor task launch worker for task 104514 on Wed Aug 01 07:36:12 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#98 selector by thread:Executor task launch worker for task 108856 on Wed Aug 01 07:40:24 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#99 selector by thread:Executor task launch worker for task 109137 on Wed Aug 01 07:40:43 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#100 selector by thread:Executor task launch worker for task 109400 on Wed Aug 01 07:40:53 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#101 selector by thread:Executor task launch worker for task 111090 on Wed Aug 01 07:41:59 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#102 selector by thread:Executor task launch worker for task 111480 on Wed Aug 01 07:42:44 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#103 selector by thread:Executor task launch worker for task 111774 on Wed Aug 01 07:42:59 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:278) | |
at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:188) | |
at com.clicktale.apps.writer.KafkaWriter.<init>(KafkaWriter.scala:19) | |
at com.clicktale.apps.writer.WriterProvider$.<init>(WriterProvider.scala:20) | |
at com.clicktale.apps.writer.WriterProvider$.<clinit>(WriterProvider.scala:16) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:25) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:22) | |
at scala.collection.immutable.List.foreach(List.scala:381) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:22) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:20) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:53) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:49) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#104 /usr/lib/hadoop/lib/aws-java-sdk-bundle-1.11.134.jar by thread:Executor task launch worker for task 111774 on Wed Aug 01 07:42:59 UTC 2018 | |
at java.util.zip.ZipFile.<init>(ZipFile.java:150) | |
at java.util.jar.JarFile.<init>(JarFile.java:166) | |
at java.util.jar.JarFile.<init>(JarFile.java:103) | |
at sun.net.www.protocol.jar.URLJarFile.<init>(URLJarFile.java:93) | |
at sun.net.www.protocol.jar.URLJarFile.getJarFile(URLJarFile.java:69) | |
at sun.net.www.protocol.jar.JarFileFactory.get(JarFileFactory.java:84) | |
at sun.net.www.protocol.jar.JarURLConnection.connect(JarURLConnection.java:122) | |
at sun.net.www.protocol.jar.JarURLConnection.getInputStream(JarURLConnection.java:150) | |
at java.net.URLClassLoader.getResourceAsStream(URLClassLoader.java:238) | |
at com.amazonaws.partitions.PartitionsLoader.build(PartitionsLoader.java:80) | |
at com.amazonaws.regions.RegionMetadataFactory.create(RegionMetadataFactory.java:30) | |
at com.amazonaws.regions.RegionUtils.initialize(RegionUtils.java:64) | |
at com.amazonaws.regions.RegionUtils.getRegionMetadata(RegionUtils.java:52) | |
at com.amazonaws.regions.RegionUtils.getRegion(RegionUtils.java:105) | |
at com.amazonaws.client.builder.AwsClientBuilder.withRegion(AwsClientBuilder.java:239) | |
at com.clicktale.apps.writer.S3Writer.buildClient(S3Writer.scala:54) | |
at com.clicktale.apps.writer.S3Writer.<init>(S3Writer.scala:22) | |
at com.clicktale.apps.writer.WriterProvider$.<init>(WriterProvider.scala:26) | |
at com.clicktale.apps.writer.WriterProvider$.<clinit>(WriterProvider.scala:16) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:25) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:22) | |
at scala.collection.immutable.List.foreach(List.scala:381) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:22) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:20) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:53) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:49) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#105 /usr/java/jdk1.8.0_112/jre/lib/ext/sunec.jar by thread:Executor task launch worker for task 111774 on Wed Aug 01 07:42:59 UTC 2018 | |
at java.util.zip.ZipFile.<init>(ZipFile.java:150) | |
at java.util.jar.JarFile.<init>(JarFile.java:166) | |
at java.util.jar.JarFile.<init>(JarFile.java:103) | |
at sun.misc.URLClassPath$JarLoader.getJarFile(URLClassPath.java:893) | |
at sun.misc.URLClassPath$JarLoader.access$700(URLClassPath.java:756) | |
at sun.misc.URLClassPath$JarLoader$1.run(URLClassPath.java:838) | |
at sun.misc.URLClassPath$JarLoader$1.run(URLClassPath.java:831) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at sun.misc.URLClassPath$JarLoader.ensureOpen(URLClassPath.java:830) | |
at sun.misc.URLClassPath$JarLoader.getResource(URLClassPath.java:1001) | |
at sun.misc.URLClassPath.getResource(URLClassPath.java:212) | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:365) | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:362) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at java.net.URLClassLoader.findClass(URLClassLoader.java:361) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:424) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:411) | |
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:331) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:357) | |
at sun.security.jca.ProviderConfig$2.run(ProviderConfig.java:215) | |
at sun.security.jca.ProviderConfig$2.run(ProviderConfig.java:206) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at sun.security.jca.ProviderConfig.doLoadProvider(ProviderConfig.java:206) | |
at sun.security.jca.ProviderConfig.getProvider(ProviderConfig.java:187) | |
at sun.security.jca.ProviderList.getProvider(ProviderList.java:233) | |
at sun.security.jca.ProviderList.getService(ProviderList.java:331) | |
at sun.security.jca.GetInstance.getInstance(GetInstance.java:157) | |
at javax.net.ssl.SSLContext.getInstance(SSLContext.java:156) | |
at com.amazonaws.internal.SdkSSLContext.getPreferredSSLContext(SdkSSLContext.java:32) | |
at com.amazonaws.http.apache.client.impl.ApacheConnectionManagerFactory.getPreferredSocketFactory(ApacheConnectionManagerFactory.java:91) | |
at com.amazonaws.http.apache.client.impl.ApacheConnectionManagerFactory.create(ApacheConnectionManagerFactory.java:65) | |
at com.amazonaws.http.apache.client.impl.ApacheConnectionManagerFactory.create(ApacheConnectionManagerFactory.java:58) | |
at com.amazonaws.http.apache.client.impl.ApacheHttpClientFactory.create(ApacheHttpClientFactory.java:50) | |
at com.amazonaws.http.apache.client.impl.ApacheHttpClientFactory.create(ApacheHttpClientFactory.java:38) | |
at com.amazonaws.http.AmazonHttpClient.<init>(AmazonHttpClient.java:314) | |
at com.amazonaws.http.AmazonHttpClient.<init>(AmazonHttpClient.java:298) | |
at com.amazonaws.AmazonWebServiceClient.<init>(AmazonWebServiceClient.java:173) | |
at com.amazonaws.services.s3.AmazonS3Client.<init>(AmazonS3Client.java:627) | |
at com.amazonaws.services.s3.AmazonS3Builder$1.apply(AmazonS3Builder.java:35) | |
at com.amazonaws.services.s3.AmazonS3Builder$1.apply(AmazonS3Builder.java:32) | |
at com.amazonaws.services.s3.AmazonS3ClientBuilder.build(AmazonS3ClientBuilder.java:64) | |
at com.amazonaws.services.s3.AmazonS3ClientBuilder.build(AmazonS3ClientBuilder.java:28) | |
at com.amazonaws.client.builder.AwsSyncClientBuilder.build(AwsSyncClientBuilder.java:46) | |
at com.clicktale.apps.writer.S3Writer.buildClient(S3Writer.scala:56) | |
at com.clicktale.apps.writer.S3Writer.<init>(S3Writer.scala:22) | |
at com.clicktale.apps.writer.WriterProvider$.<init>(WriterProvider.scala:26) | |
at com.clicktale.apps.writer.WriterProvider$.<clinit>(WriterProvider.scala:16) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:25) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:22) | |
at scala.collection.immutable.List.foreach(List.scala:381) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:22) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:20) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:53) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:49) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#106 /usr/java/jdk1.8.0_112/jre/lib/jce.jar by thread:Executor task launch worker for task 111774 on Wed Aug 01 07:42:59 UTC 2018 | |
at java.util.zip.ZipFile.<init>(ZipFile.java:150) | |
at java.util.jar.JarFile.<init>(JarFile.java:166) | |
at java.util.jar.JarFile.<init>(JarFile.java:103) | |
at sun.misc.URLClassPath$JarLoader.getJarFile(URLClassPath.java:893) | |
at sun.misc.URLClassPath$JarLoader.access$700(URLClassPath.java:756) | |
at sun.misc.URLClassPath$JarLoader$1.run(URLClassPath.java:838) | |
at sun.misc.URLClassPath$JarLoader$1.run(URLClassPath.java:831) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at sun.misc.URLClassPath$JarLoader.ensureOpen(URLClassPath.java:830) | |
at sun.misc.URLClassPath$JarLoader.getResource(URLClassPath.java:1001) | |
at sun.misc.URLClassPath.getResource(URLClassPath.java:212) | |
at sun.misc.URLClassPath.getResource(URLClassPath.java:265) | |
at java.lang.ClassLoader.getBootstrapResource(ClassLoader.java:1264) | |
at java.lang.ClassLoader.getResource(ClassLoader.java:1093) | |
at java.lang.ClassLoader.getResource(ClassLoader.java:1091) | |
at java.lang.ClassLoader.getSystemResource(ClassLoader.java:1226) | |
at javax.crypto.JceSecurity.setupJurisdictionPolicies(JceSecurity.java:251) | |
at javax.crypto.JceSecurity.access$000(JceSecurity.java:48) | |
at javax.crypto.JceSecurity$1.run(JceSecurity.java:80) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at javax.crypto.JceSecurity.<clinit>(JceSecurity.java:77) | |
at javax.crypto.JceSecurityManager.<clinit>(JceSecurityManager.java:65) | |
at javax.crypto.Cipher.getConfiguredPermission(Cipher.java:2587) | |
at javax.crypto.Cipher.getMaxAllowedKeyLength(Cipher.java:2611) | |
at sun.security.ssl.CipherSuite$BulkCipher.isUnlimited(CipherSuite.java:533) | |
at sun.security.ssl.CipherSuite$BulkCipher.<init>(CipherSuite.java:505) | |
at sun.security.ssl.CipherSuite.<clinit>(CipherSuite.java:612) | |
at sun.security.ssl.SSLContextImpl.getApplicableCipherSuiteList(SSLContextImpl.java:293) | |
at sun.security.ssl.SSLContextImpl.access$100(SSLContextImpl.java:41) | |
at sun.security.ssl.SSLContextImpl$AbstractTLSContext.<clinit>(SSLContextImpl.java:424) | |
at java.lang.Class.forName0(Native Method) | |
at java.lang.Class.forName(Class.java:264) | |
at java.security.Provider$Service.getImplClass(Provider.java:1634) | |
at java.security.Provider$Service.newInstance(Provider.java:1592) | |
at sun.security.jca.GetInstance.getInstance(GetInstance.java:236) | |
at sun.security.jca.GetInstance.getInstance(GetInstance.java:164) | |
at javax.net.ssl.SSLContext.getInstance(SSLContext.java:156) | |
at com.amazonaws.internal.SdkSSLContext.getPreferredSSLContext(SdkSSLContext.java:32) | |
at com.amazonaws.http.apache.client.impl.ApacheConnectionManagerFactory.getPreferredSocketFactory(ApacheConnectionManagerFactory.java:91) | |
at com.amazonaws.http.apache.client.impl.ApacheConnectionManagerFactory.create(ApacheConnectionManagerFactory.java:65) | |
at com.amazonaws.http.apache.client.impl.ApacheConnectionManagerFactory.create(ApacheConnectionManagerFactory.java:58) | |
at com.amazonaws.http.apache.client.impl.ApacheHttpClientFactory.create(ApacheHttpClientFactory.java:50) | |
at com.amazonaws.http.apache.client.impl.ApacheHttpClientFactory.create(ApacheHttpClientFactory.java:38) | |
at com.amazonaws.http.AmazonHttpClient.<init>(AmazonHttpClient.java:314) | |
at com.amazonaws.http.AmazonHttpClient.<init>(AmazonHttpClient.java:298) | |
at com.amazonaws.AmazonWebServiceClient.<init>(AmazonWebServiceClient.java:173) | |
at com.amazonaws.services.s3.AmazonS3Client.<init>(AmazonS3Client.java:627) | |
at com.amazonaws.services.s3.AmazonS3Builder$1.apply(AmazonS3Builder.java:35) | |
at com.amazonaws.services.s3.AmazonS3Builder$1.apply(AmazonS3Builder.java:32) | |
at com.amazonaws.services.s3.AmazonS3ClientBuilder.build(AmazonS3ClientBuilder.java:64) | |
at com.amazonaws.services.s3.AmazonS3ClientBuilder.build(AmazonS3ClientBuilder.java:28) | |
at com.amazonaws.client.builder.AwsSyncClientBuilder.build(AwsSyncClientBuilder.java:46) | |
at com.clicktale.apps.writer.S3Writer.buildClient(S3Writer.scala:56) | |
at com.clicktale.apps.writer.S3Writer.<init>(S3Writer.scala:22) | |
at com.clicktale.apps.writer.WriterProvider$.<init>(WriterProvider.scala:26) | |
at com.clicktale.apps.writer.WriterProvider$.<clinit>(WriterProvider.scala:16) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:25) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:22) | |
at scala.collection.immutable.List.foreach(List.scala:381) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:22) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:20) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:53) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:49) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#107 /usr/lib/spark/jars/httpclient-4.5.4.jar by thread:Executor task launch worker for task 111774 on Wed Aug 01 07:42:59 UTC 2018 | |
at java.util.zip.ZipFile.<init>(ZipFile.java:150) | |
at java.util.jar.JarFile.<init>(JarFile.java:166) | |
at java.util.jar.JarFile.<init>(JarFile.java:103) | |
at sun.net.www.protocol.jar.URLJarFile.<init>(URLJarFile.java:93) | |
at sun.net.www.protocol.jar.URLJarFile.getJarFile(URLJarFile.java:69) | |
at sun.net.www.protocol.jar.JarFileFactory.get(JarFileFactory.java:84) | |
at sun.net.www.protocol.jar.JarURLConnection.connect(JarURLConnection.java:122) | |
at sun.net.www.protocol.jar.JarURLConnection.getInputStream(JarURLConnection.java:150) | |
at java.net.URL.openStream(URL.java:1045) | |
at com.amazonaws.thirdparty.apache.http.conn.util.PublicSuffixMatcherLoader.load(PublicSuffixMatcherLoader.java:60) | |
at com.amazonaws.thirdparty.apache.http.conn.util.PublicSuffixMatcherLoader.getDefault(PublicSuffixMatcherLoader.java:88) | |
at com.amazonaws.thirdparty.apache.http.impl.client.HttpClientBuilder.build(HttpClientBuilder.java:938) | |
at com.amazonaws.http.apache.client.impl.ApacheHttpClientFactory.create(ApacheHttpClientFactory.java:77) | |
at com.amazonaws.http.apache.client.impl.ApacheHttpClientFactory.create(ApacheHttpClientFactory.java:38) | |
at com.amazonaws.http.AmazonHttpClient.<init>(AmazonHttpClient.java:314) | |
at com.amazonaws.http.AmazonHttpClient.<init>(AmazonHttpClient.java:298) | |
at com.amazonaws.AmazonWebServiceClient.<init>(AmazonWebServiceClient.java:173) | |
at com.amazonaws.services.s3.AmazonS3Client.<init>(AmazonS3Client.java:627) | |
at com.amazonaws.services.s3.AmazonS3Builder$1.apply(AmazonS3Builder.java:35) | |
at com.amazonaws.services.s3.AmazonS3Builder$1.apply(AmazonS3Builder.java:32) | |
at com.amazonaws.services.s3.AmazonS3ClientBuilder.build(AmazonS3ClientBuilder.java:64) | |
at com.amazonaws.services.s3.AmazonS3ClientBuilder.build(AmazonS3ClientBuilder.java:28) | |
at com.amazonaws.client.builder.AwsSyncClientBuilder.build(AwsSyncClientBuilder.java:46) | |
at com.clicktale.apps.writer.S3Writer.buildClient(S3Writer.scala:56) | |
at com.clicktale.apps.writer.S3Writer.<init>(S3Writer.scala:22) | |
at com.clicktale.apps.writer.WriterProvider$.<init>(WriterProvider.scala:26) | |
at com.clicktale.apps.writer.WriterProvider$.<clinit>(WriterProvider.scala:16) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:25) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:22) | |
at scala.collection.immutable.List.foreach(List.scala:381) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:22) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:20) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:53) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:49) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#108 /usr/java/jdk1.8.0_112/jre/lib/ext/sunjce_provider.jar by thread:Executor task launch worker for task 111774 on Wed Aug 01 07:43:01 UTC 2018 | |
at java.util.zip.ZipFile.<init>(ZipFile.java:150) | |
at java.util.jar.JarFile.<init>(JarFile.java:166) | |
at java.util.jar.JarFile.<init>(JarFile.java:103) | |
at sun.misc.URLClassPath$JarLoader.getJarFile(URLClassPath.java:893) | |
at sun.misc.URLClassPath$JarLoader.access$700(URLClassPath.java:756) | |
at sun.misc.URLClassPath$JarLoader$1.run(URLClassPath.java:838) | |
at sun.misc.URLClassPath$JarLoader$1.run(URLClassPath.java:831) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at sun.misc.URLClassPath$JarLoader.ensureOpen(URLClassPath.java:830) | |
at sun.misc.URLClassPath$JarLoader.getResource(URLClassPath.java:1001) | |
at sun.misc.URLClassPath.getResource(URLClassPath.java:212) | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:365) | |
at java.net.URLClassLoader$1.run(URLClassLoader.java:362) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at java.net.URLClassLoader.findClass(URLClassLoader.java:361) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:424) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:411) | |
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:331) | |
at java.lang.ClassLoader.loadClass(ClassLoader.java:357) | |
at sun.security.jca.ProviderConfig$2.run(ProviderConfig.java:215) | |
at sun.security.jca.ProviderConfig$2.run(ProviderConfig.java:206) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at sun.security.jca.ProviderConfig.doLoadProvider(ProviderConfig.java:206) | |
at sun.security.jca.ProviderConfig.getProvider(ProviderConfig.java:187) | |
at sun.security.jca.ProviderList.getProvider(ProviderList.java:233) | |
at sun.security.jca.ProviderList$ServiceList.tryGet(ProviderList.java:434) | |
at sun.security.jca.ProviderList$ServiceList.access$200(ProviderList.java:376) | |
at sun.security.jca.ProviderList$ServiceList$1.hasNext(ProviderList.java:486) | |
at javax.crypto.Mac.getInstance(Mac.java:174) | |
at com.amazonaws.auth.SigningAlgorithm$1.initialValue(SigningAlgorithm.java:35) | |
at com.amazonaws.auth.SigningAlgorithm$1.initialValue(SigningAlgorithm.java:31) | |
at java.lang.ThreadLocal.setInitialValue(ThreadLocal.java:180) | |
at java.lang.ThreadLocal.get(ThreadLocal.java:170) | |
at com.amazonaws.auth.SigningAlgorithm.getMac(SigningAlgorithm.java:49) | |
at com.amazonaws.auth.AbstractAWSSigner.sign(AbstractAWSSigner.java:120) | |
at com.amazonaws.auth.AbstractAWSSigner.sign(AbstractAWSSigner.java:99) | |
at com.amazonaws.auth.AWS4Signer.newSigningKey(AWS4Signer.java:607) | |
at com.amazonaws.auth.AWS4Signer.deriveSigningKey(AWS4Signer.java:376) | |
at com.amazonaws.auth.AWS4Signer.sign(AWS4Signer.java:225) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1164) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1030) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:742) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:716) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649) | |
at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513) | |
at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4221) | |
at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4168) | |
at com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1718) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply$mcV$sp(S3Writer.scala:44) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply(S3Writer.scala:34) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply(S3Writer.scala:34) | |
at scala.util.Try$.apply(Try.scala:192) | |
at com.clicktale.apps.writer.S3Writer.com$clicktale$apps$writer$S3Writer$$writeToS3(S3Writer.scala:34) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$write$1.apply(S3Writer.scala:29) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$write$1.apply(S3Writer.scala:28) | |
at scala.util.Success.flatMap(Try.scala:231) | |
at com.clicktale.apps.writer.S3Writer.write(S3Writer.scala:28) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:59) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:22) | |
at scala.collection.immutable.List.foreach(List.scala:381) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:22) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:20) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:53) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:49) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#109 /usr/java/jdk1.8.0_112/jre/lib/resources.jar by thread:Executor task launch worker for task 111774 on Wed Aug 01 07:43:01 UTC 2018 | |
at java.util.zip.ZipFile.<init>(ZipFile.java:150) | |
at java.util.jar.JarFile.<init>(JarFile.java:166) | |
at java.util.jar.JarFile.<init>(JarFile.java:103) | |
at sun.misc.URLClassPath$JarLoader.getJarFile(URLClassPath.java:893) | |
at sun.misc.URLClassPath$JarLoader.access$700(URLClassPath.java:756) | |
at sun.misc.URLClassPath$JarLoader$1.run(URLClassPath.java:838) | |
at sun.misc.URLClassPath$JarLoader$1.run(URLClassPath.java:831) | |
at java.security.AccessController.doPrivileged(Native Method) | |
at sun.misc.URLClassPath$JarLoader.ensureOpen(URLClassPath.java:830) | |
at sun.misc.URLClassPath$JarLoader.getResource(URLClassPath.java:1001) | |
at sun.misc.URLClassPath.getResource(URLClassPath.java:212) | |
at sun.misc.URLClassPath.getResource(URLClassPath.java:265) | |
at java.lang.ClassLoader.getBootstrapResource(ClassLoader.java:1264) | |
at java.lang.ClassLoader.getResource(ClassLoader.java:1093) | |
at java.lang.ClassLoader.getResource(ClassLoader.java:1091) | |
at java.lang.ClassLoader.getSystemResource(ClassLoader.java:1226) | |
at java.lang.ClassLoader.getSystemResourceAsStream(ClassLoader.java:1329) | |
at java.lang.Class.getResourceAsStream(Class.java:2221) | |
at java.net.IDN.<clinit>(IDN.java:237) | |
at javax.net.ssl.SNIHostName.<init>(SNIHostName.java:99) | |
at sun.security.ssl.Utilities.rawToSNIHostName(Utilities.java:102) | |
at sun.security.ssl.Utilities.addToSNIServerNameList(Utilities.java:49) | |
at sun.security.ssl.SSLSocketImpl.<init>(SSLSocketImpl.java:566) | |
at sun.security.ssl.SSLSocketFactoryImpl.createSocket(SSLSocketFactoryImpl.java:110) | |
at com.amazonaws.thirdparty.apache.http.conn.ssl.SSLConnectionSocketFactory.createLayeredSocket(SSLConnectionSocketFactory.java:363) | |
at com.amazonaws.thirdparty.apache.http.conn.ssl.SSLConnectionSocketFactory.connectSocket(SSLConnectionSocketFactory.java:353) | |
at com.amazonaws.http.conn.ssl.SdkTLSSocketFactory.connectSocket(SdkTLSSocketFactory.java:132) | |
at com.amazonaws.thirdparty.apache.http.impl.conn.DefaultHttpClientConnectionOperator.connect(DefaultHttpClientConnectionOperator.java:141) | |
at com.amazonaws.thirdparty.apache.http.impl.conn.PoolingHttpClientConnectionManager.connect(PoolingHttpClientConnectionManager.java:353) | |
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) | |
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) | |
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) | |
at java.lang.reflect.Method.invoke(Method.java:498) | |
at com.amazonaws.http.conn.ClientConnectionManagerFactory$Handler.invoke(ClientConnectionManagerFactory.java:76) | |
at com.amazonaws.http.conn.$Proxy28.connect(Unknown Source) | |
at com.amazonaws.thirdparty.apache.http.impl.execchain.MainClientExec.establishRoute(MainClientExec.java:380) | |
at com.amazonaws.thirdparty.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:236) | |
at com.amazonaws.thirdparty.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:184) | |
at com.amazonaws.thirdparty.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:184) | |
at com.amazonaws.thirdparty.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:82) | |
at com.amazonaws.thirdparty.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:55) | |
at com.amazonaws.http.apache.client.impl.SdkHttpClient.execute(SdkHttpClient.java:72) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1190) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1030) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:742) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:716) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649) | |
at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513) | |
at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4221) | |
at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4168) | |
at com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1718) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply$mcV$sp(S3Writer.scala:44) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply(S3Writer.scala:34) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply(S3Writer.scala:34) | |
at scala.util.Try$.apply(Try.scala:192) | |
at com.clicktale.apps.writer.S3Writer.com$clicktale$apps$writer$S3Writer$$writeToS3(S3Writer.scala:34) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$write$1.apply(S3Writer.scala:29) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$write$1.apply(S3Writer.scala:28) | |
at scala.util.Success.flatMap(Try.scala:231) | |
at com.clicktale.apps.writer.S3Writer.write(S3Writer.scala:28) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:59) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:22) | |
at scala.collection.immutable.List.foreach(List.scala:381) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:22) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:20) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:53) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:49) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#110 /usr/java/jdk1.8.0_112/jre/lib/resources.jar by thread:Executor task launch worker for task 111774 on Wed Aug 01 07:43:01 UTC 2018 | |
at java.util.zip.ZipFile.<init>(ZipFile.java:150) | |
at java.util.jar.JarFile.<init>(JarFile.java:166) | |
at java.util.jar.JarFile.<init>(JarFile.java:103) | |
at sun.net.www.protocol.jar.URLJarFile.<init>(URLJarFile.java:93) | |
at sun.net.www.protocol.jar.URLJarFile.getJarFile(URLJarFile.java:69) | |
at sun.net.www.protocol.jar.JarFileFactory.get(JarFileFactory.java:84) | |
at sun.net.www.protocol.jar.JarURLConnection.connect(JarURLConnection.java:122) | |
at sun.net.www.protocol.jar.JarURLConnection.getInputStream(JarURLConnection.java:150) | |
at java.net.URL.openStream(URL.java:1045) | |
at java.lang.ClassLoader.getSystemResourceAsStream(ClassLoader.java:1331) | |
at java.lang.Class.getResourceAsStream(Class.java:2221) | |
at java.net.IDN.<clinit>(IDN.java:237) | |
at javax.net.ssl.SNIHostName.<init>(SNIHostName.java:99) | |
at sun.security.ssl.Utilities.rawToSNIHostName(Utilities.java:102) | |
at sun.security.ssl.Utilities.addToSNIServerNameList(Utilities.java:49) | |
at sun.security.ssl.SSLSocketImpl.<init>(SSLSocketImpl.java:566) | |
at sun.security.ssl.SSLSocketFactoryImpl.createSocket(SSLSocketFactoryImpl.java:110) | |
at com.amazonaws.thirdparty.apache.http.conn.ssl.SSLConnectionSocketFactory.createLayeredSocket(SSLConnectionSocketFactory.java:363) | |
at com.amazonaws.thirdparty.apache.http.conn.ssl.SSLConnectionSocketFactory.connectSocket(SSLConnectionSocketFactory.java:353) | |
at com.amazonaws.http.conn.ssl.SdkTLSSocketFactory.connectSocket(SdkTLSSocketFactory.java:132) | |
at com.amazonaws.thirdparty.apache.http.impl.conn.DefaultHttpClientConnectionOperator.connect(DefaultHttpClientConnectionOperator.java:141) | |
at com.amazonaws.thirdparty.apache.http.impl.conn.PoolingHttpClientConnectionManager.connect(PoolingHttpClientConnectionManager.java:353) | |
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) | |
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) | |
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) | |
at java.lang.reflect.Method.invoke(Method.java:498) | |
at com.amazonaws.http.conn.ClientConnectionManagerFactory$Handler.invoke(ClientConnectionManagerFactory.java:76) | |
at com.amazonaws.http.conn.$Proxy28.connect(Unknown Source) | |
at com.amazonaws.thirdparty.apache.http.impl.execchain.MainClientExec.establishRoute(MainClientExec.java:380) | |
at com.amazonaws.thirdparty.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:236) | |
at com.amazonaws.thirdparty.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:184) | |
at com.amazonaws.thirdparty.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:184) | |
at com.amazonaws.thirdparty.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:82) | |
at com.amazonaws.thirdparty.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:55) | |
at com.amazonaws.http.apache.client.impl.SdkHttpClient.execute(SdkHttpClient.java:72) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1190) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1030) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:742) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:716) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667) | |
at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649) | |
at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513) | |
at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4221) | |
at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4168) | |
at com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1718) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply$mcV$sp(S3Writer.scala:44) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply(S3Writer.scala:34) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply(S3Writer.scala:34) | |
at scala.util.Try$.apply(Try.scala:192) | |
at com.clicktale.apps.writer.S3Writer.com$clicktale$apps$writer$S3Writer$$writeToS3(S3Writer.scala:34) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$write$1.apply(S3Writer.scala:29) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$write$1.apply(S3Writer.scala:28) | |
at scala.util.Success.flatMap(Try.scala:231) | |
at com.clicktale.apps.writer.S3Writer.write(S3Writer.scala:28) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:59) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:22) | |
at scala.collection.immutable.List.foreach(List.scala:381) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:22) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:20) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:53) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:49) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#111 /usr/lib/hadoop/lib/jaxb-impl-2.2.3-1.jar by thread:Executor task launch worker for task 111774 on Wed Aug 01 07:43:01 UTC 2018 | |
at java.util.zip.ZipFile.<init>(ZipFile.java:150) | |
at java.util.jar.JarFile.<init>(JarFile.java:166) | |
at java.util.jar.JarFile.<init>(JarFile.java:103) | |
at sun.net.www.protocol.jar.URLJarFile.<init>(URLJarFile.java:93) | |
at sun.net.www.protocol.jar.URLJarFile.getJarFile(URLJarFile.java:69) | |
at sun.net.www.protocol.jar.JarFileFactory.get(JarFileFactory.java:84) | |
at sun.net.www.protocol.jar.JarURLConnection.connect(JarURLConnection.java:122) | |
at sun.net.www.protocol.jar.JarURLConnection.getInputStream(JarURLConnection.java:150) | |
at java.net.URL.openStream(URL.java:1045) | |
at javax.xml.bind.ContextFinder.find(ContextFinder.java:436) | |
at javax.xml.bind.JAXBContext.newInstance(JAXBContext.java:641) | |
at javax.xml.bind.JAXBContext.newInstance(JAXBContext.java:584) | |
at com.amazonaws.util.Base64.<clinit>(Base64.java:44) | |
at com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1728) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply$mcV$sp(S3Writer.scala:44) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply(S3Writer.scala:34) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$com$clicktale$apps$writer$S3Writer$$writeToS3$1.apply(S3Writer.scala:34) | |
at scala.util.Try$.apply(Try.scala:192) | |
at com.clicktale.apps.writer.S3Writer.com$clicktale$apps$writer$S3Writer$$writeToS3(S3Writer.scala:34) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$write$1.apply(S3Writer.scala:29) | |
at com.clicktale.apps.writer.S3Writer$$anonfun$write$1.apply(S3Writer.scala:28) | |
at scala.util.Success.flatMap(Try.scala:231) | |
at com.clicktale.apps.writer.S3Writer.write(S3Writer.scala:28) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:59) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1$$anonfun$process$1.apply(ExtendedForeachWriter.scala:22) | |
at scala.collection.immutable.List.foreach(List.scala:381) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:22) | |
at com.clicktale.apps.ExtendedForeachWriter$$anon$1.process(ExtendedForeachWriter.scala:20) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:53) | |
at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:49) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2074) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#112 selector by thread:Executor task launch worker for task 114298 on Wed Aug 01 07:46:31 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#113 selector by thread:Executor task launch worker for task 115723 on Wed Aug 01 07:48:32 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#114 selector by thread:Executor task launch worker for task 117065 on Wed Aug 01 07:49:53 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#115 selector by thread:Executor task launch worker for task 117177 on Wed Aug 01 07:50:26 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#116 selector by thread:Executor task launch worker for task 118881 on Wed Aug 01 07:52:29 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#117 selector by thread:Executor task launch worker for task 123818 on Wed Aug 01 07:58:39 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#118 selector by thread:Executor task launch worker for task 124811 on Wed Aug 01 07:59:59 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#119 selector by thread:Executor task launch worker for task 125792 on Wed Aug 01 08:01:04 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#120 selector by thread:Executor task launch worker for task 126546 on Wed Aug 01 08:02:17 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#121 selector by thread:Executor task launch worker for task 132401 on Wed Aug 01 08:09:54 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#122 selector by thread:Executor task launch worker for task 135066 on Wed Aug 01 08:13:47 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#123 selector by thread:Executor task launch worker for task 136337 on Wed Aug 01 08:15:33 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#124 selector by thread:Executor task launch worker for task 136386 on Wed Aug 01 08:15:35 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#125 selector by thread:Executor task launch worker for task 136482 on Wed Aug 01 08:15:38 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#126 selector by thread:Executor task launch worker for task 137825 on Wed Aug 01 08:17:22 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#127 selector by thread:Executor task launch worker for task 138784 on Wed Aug 01 08:18:13 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#128 selector by thread:Executor task launch worker for task 138809 on Wed Aug 01 08:18:41 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#129 selector by thread:Executor task launch worker for task 139051 on Wed Aug 01 08:19:04 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#130 selector by thread:Executor task launch worker for task 139891 on Wed Aug 01 08:20:11 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#131 selector by thread:Executor task launch worker for task 140962 on Wed Aug 01 08:21:10 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#132 selector by thread:Executor task launch worker for task 142210 on Wed Aug 01 08:23:19 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#133 selector by thread:Executor task launch worker for task 143297 on Wed Aug 01 08:24:48 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#134 selector by thread:Executor task launch worker for task 144466 on Wed Aug 01 08:26:22 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#135 selector by thread:Executor task launch worker for task 144674 on Wed Aug 01 08:26:34 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#136 selector by thread:Executor task launch worker for task 147915 on Wed Aug 01 08:31:12 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#137 selector by thread:Executor task launch worker for task 151576 on Wed Aug 01 08:36:11 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#138 selector by thread:Executor task launch worker for task 151753 on Wed Aug 01 08:36:17 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#139 selector by thread:Executor task launch worker for task 154211 on Wed Aug 01 08:40:10 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#140 selector by thread:Executor task launch worker for task 154523 on Wed Aug 01 08:40:29 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#141 selector by thread:Executor task launch worker for task 154539 on Wed Aug 01 08:40:56 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#142 selector by thread:Executor task launch worker for task 154601 on Wed Aug 01 08:41:08 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#143 selector by thread:Executor task launch worker for task 154880 on Wed Aug 01 08:41:18 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#144 selector by thread:Executor task launch worker for task 155649 on Wed Aug 01 08:42:32 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#145 selector by thread:Executor task launch worker for task 157226 on Wed Aug 01 08:44:58 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#146 selector by thread:Executor task launch worker for task 157970 on Wed Aug 01 08:46:15 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#147 selector by thread:Executor task launch worker for task 158056 on Wed Aug 01 08:46:26 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#148 selector by thread:Executor task launch worker for task 159056 on Wed Aug 01 08:48:01 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#149 selector by thread:Executor task launch worker for task 160369 on Wed Aug 01 08:50:07 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#150 selector by thread:Executor task launch worker for task 161242 on Wed Aug 01 08:51:21 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#151 selector by thread:Executor task launch worker for task 162211 on Wed Aug 01 08:52:38 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#152 selector by thread:Executor task launch worker for task 162442 on Wed Aug 01 08:52:53 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#153 selector by thread:Executor task launch worker for task 163880 on Wed Aug 01 08:54:38 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#154 selector by thread:Executor task launch worker for task 164553 on Wed Aug 01 08:55:44 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#155 selector by thread:Executor task launch worker for task 165640 on Wed Aug 01 08:58:43 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#156 selector by thread:Executor task launch worker for task 166481 on Wed Aug 01 08:59:27 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#157 selector by thread:Executor task launch worker for task 166504 on Wed Aug 01 08:59:54 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#158 selector by thread:Executor task launch worker for task 167018 on Wed Aug 01 09:00:32 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#159 selector by thread:Executor task launch worker for task 167160 on Wed Aug 01 09:01:07 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#160 selector by thread:Executor task launch worker for task 168090 on Wed Aug 01 09:01:52 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#161 selector by thread:Executor task launch worker for task 168824 on Wed Aug 01 09:03:32 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#162 selector by thread:Executor task launch worker for task 169578 on Wed Aug 01 09:04:07 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#163 selector by thread:Executor task launch worker for task 169675 on Wed Aug 01 09:04:42 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#164 selector by thread:Executor task launch worker for task 169841 on Wed Aug 01 09:04:57 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#165 selector by thread:Executor task launch worker for task 169864 on Wed Aug 01 09:04:58 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#166 selector by thread:Executor task launch worker for task 170472 on Wed Aug 01 09:05:54 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#167 selector by thread:Executor task launch worker for task 170578 on Wed Aug 01 09:06:03 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#168 selector by thread:Executor task launch worker for task 170720 on Wed Aug 01 09:06:09 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#169 selector by thread:Executor task launch worker for task 171819 on Wed Aug 01 09:08:16 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#170 selector by thread:Executor task launch worker for task 171944 on Wed Aug 01 09:08:21 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#171 selector by thread:Executor task launch worker for task 173275 on Wed Aug 01 09:10:04 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#172 selector by thread:Executor task launch worker for task 173955 on Wed Aug 01 09:11:46 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#173 selector by thread:Executor task launch worker for task 174744 on Wed Aug 01 09:12:17 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#174 selector by thread:Executor task launch worker for task 174763 on Wed Aug 01 09:12:44 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#175 selector by thread:Executor task launch worker for task 174888 on Wed Aug 01 09:12:58 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#176 selector by thread:Executor task launch worker for task 175627 on Wed Aug 01 09:14:08 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#177 selector by thread:Executor task launch worker for task 175784 on Wed Aug 01 09:14:13 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#178 selector by thread:Executor task launch worker for task 176176 on Wed Aug 01 09:14:28 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#179 selector by thread:Executor task launch worker for task 176976 on Wed Aug 01 09:16:15 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#180 selector by thread:Executor task launch worker for task 177328 on Wed Aug 01 09:16:28 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#181 selector by thread:Executor task launch worker for task 177825 on Wed Aug 01 09:17:26 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#182 selector by thread:Executor task launch worker for task 179106 on Wed Aug 01 09:20:22 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#183 selector by thread:Executor task launch worker for task 179176 on Wed Aug 01 09:20:25 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#184 selector by thread:Executor task launch worker for task 179201 on Wed Aug 01 09:20:25 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#185 selector by thread:Executor task launch worker for task 179728 on Wed Aug 01 09:21:23 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#186 selector by thread:Executor task launch worker for task 180083 on Wed Aug 01 09:21:46 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#187 selector by thread:Executor task launch worker for task 180290 on Wed Aug 01 09:21:55 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#188 selector by thread:Executor task launch worker for task 180307 on Wed Aug 01 09:21:56 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#189 selector by thread:Executor task launch worker for task 180376 on Wed Aug 01 09:22:30 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#190 selector by thread:Executor task launch worker for task 180881 on Wed Aug 01 09:23:26 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#191 selector by thread:Executor task launch worker for task 181498 on Wed Aug 01 09:24:00 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#192 selector by thread:Executor task launch worker for task 181521 on Wed Aug 01 09:24:28 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#193 selector by thread:Executor task launch worker for task 182067 on Wed Aug 01 09:25:30 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#194 selector by thread:Executor task launch worker for task 183651 on Wed Aug 01 09:27:59 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#195 selector by thread:Executor task launch worker for task 184257 on Wed Aug 01 09:29:01 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#196 selector by thread:Executor task launch worker for task 184362 on Wed Aug 01 09:29:38 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#197 selector by thread:Executor task launch worker for task 184931 on Wed Aug 01 09:29:58 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#198 selector by thread:Executor task launch worker for task 185721 on Wed Aug 01 09:31:39 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#199 selector by thread:Executor task launch worker for task 185818 on Wed Aug 01 09:31:42 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#200 selector by thread:Executor task launch worker for task 185937 on Wed Aug 01 09:31:46 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#201 selector by thread:Executor task launch worker for task 186011 on Wed Aug 01 09:31:49 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#202 selector by thread:Executor task launch worker for task 186122 on Wed Aug 01 09:31:53 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#203 selector by thread:Executor task launch worker for task 186153 on Wed Aug 01 09:31:54 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#204 selector by thread:Executor task launch worker for task 186217 on Wed Aug 01 09:32:25 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#205 selector by thread:Executor task launch worker for task 186810 on Wed Aug 01 09:33:30 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#206 selector by thread:Executor task launch worker for task 187274 on Wed Aug 01 09:33:49 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#207 selector by thread:Executor task launch worker for task 187291 on Wed Aug 01 09:33:50 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#208 selector by thread:Executor task launch worker for task 187432 on Wed Aug 01 09:34:23 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#209 selector by thread:Executor task launch worker for task 187472 on Wed Aug 01 09:34:31 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#210 selector by thread:Executor task launch worker for task 188504 on Wed Aug 01 09:36:28 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#211 selector by thread:Executor task launch worker for task 188610 on Wed Aug 01 09:36:32 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#212 selector by thread:Executor task launch worker for task 189451 on Wed Aug 01 09:37:41 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#213 selector by thread:Executor task launch worker for task 189457 on Wed Aug 01 09:37:41 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#214 selector by thread:Executor task launch worker for task 189472 on Wed Aug 01 09:38:08 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#215 selector by thread:Executor task launch worker for task 189523 on Wed Aug 01 09:38:16 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#216 selector by thread:Executor task launch worker for task 189754 on Wed Aug 01 09:38:29 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#217 selector by thread:Executor task launch worker for task 189834 on Wed Aug 01 09:38:32 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#218 selector by thread:Executor task launch worker for task 190059 on Wed Aug 01 09:38:40 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#219 selector by thread:Executor task launch worker for task 190139 on Wed Aug 01 09:39:17 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#220 selector by thread:Executor task launch worker for task 190635 on Wed Aug 01 09:40:15 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#221 selector by thread:Executor task launch worker for task 190763 on Wed Aug 01 09:40:19 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#222 selector by thread:Executor task launch worker for task 190848 on Wed Aug 01 09:40:22 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#223 selector by thread:Executor task launch worker for task 191066 on Wed Aug 01 09:40:29 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#224 selector by thread:Executor task launch worker for task 191816 on Wed Aug 01 09:42:10 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#225 selector by thread:Executor task launch worker for task 192272 on Wed Aug 01 09:43:09 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#226 selector by thread:Executor task launch worker for task 192483 on Wed Aug 01 09:43:18 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#227 selector by thread:Executor task launch worker for task 192617 on Wed Aug 01 09:43:58 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#228 selector by thread:Executor task launch worker for task 193482 on Wed Aug 01 09:45:09 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#229 selector by thread:Executor task launch worker for task 193498 on Wed Aug 01 09:45:09 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#230 selector by thread:Executor task launch worker for task 193545 on Wed Aug 01 09:45:10 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#231 selector by thread:Executor task launch worker for task 193577 on Wed Aug 01 09:45:11 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#232 selector by thread:Executor task launch worker for task 193592 on Wed Aug 01 09:45:12 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#233 selector by thread:Executor task launch worker for task 193627 on Wed Aug 01 09:45:12 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#234 selector by thread:Executor task launch worker for task 193642 on Wed Aug 01 09:45:13 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#235 selector by thread:Executor task launch worker for task 193667 on Wed Aug 01 09:45:13 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#236 selector by thread:Executor task launch worker for task 193673 on Wed Aug 01 09:45:14 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#237 selector by thread:Executor task launch worker for task 193680 on Wed Aug 01 09:45:14 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#238 selector by thread:Executor task launch worker for task 193689 on Wed Aug 01 09:45:14 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#239 selector by thread:Executor task launch worker for task 193697 on Wed Aug 01 09:45:14 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#240 selector by thread:Executor task launch worker for task 193705 on Wed Aug 01 09:45:14 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#241 selector by thread:Executor task launch worker for task 193712 on Wed Aug 01 09:45:15 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#242 selector by thread:Executor task launch worker for task 193722 on Wed Aug 01 09:45:15 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#243 selector by thread:Executor task launch worker for task 193730 on Wed Aug 01 09:45:15 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#244 selector by thread:Executor task launch worker for task 193737 on Wed Aug 01 09:45:16 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#245 selector by thread:Executor task launch worker for task 193746 on Wed Aug 01 09:45:16 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#246 selector by thread:Executor task launch worker for task 193753 on Wed Aug 01 09:45:16 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#247 selector by thread:Executor task launch worker for task 193760 on Wed Aug 01 09:45:16 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#248 selector by thread:Executor task launch worker for task 193770 on Wed Aug 01 09:45:17 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#249 selector by thread:Executor task launch worker for task 193779 on Wed Aug 01 09:45:17 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#250 selector by thread:Executor task launch worker for task 193785 on Wed Aug 01 09:45:17 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#251 selector by thread:Executor task launch worker for task 193792 on Wed Aug 01 09:45:17 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#252 selector by thread:Executor task launch worker for task 193802 on Wed Aug 01 09:45:18 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#253 selector by thread:Executor task launch worker for task 193811 on Wed Aug 01 09:45:18 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#254 selector by thread:Executor task launch worker for task 193817 on Wed Aug 01 09:45:18 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#255 selector by thread:Executor task launch worker for task 193826 on Wed Aug 01 09:45:18 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#256 selector by thread:Executor task launch worker for task 193835 on Wed Aug 01 09:45:18 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#257 selector by thread:Executor task launch worker for task 193840 on Wed Aug 01 09:45:19 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#258 selector by thread:Executor task launch worker for task 193851 on Wed Aug 01 09:45:19 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#259 selector by thread:Executor task launch worker for task 193859 on Wed Aug 01 09:45:19 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#260 selector by thread:Executor task launch worker for task 193864 on Wed Aug 01 09:45:20 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#261 selector by thread:Executor task launch worker for task 193875 on Wed Aug 01 09:45:20 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#262 selector by thread:Executor task launch worker for task 193882 on Wed Aug 01 09:45:20 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#263 selector by thread:Executor task launch worker for task 193889 on Wed Aug 01 09:45:21 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#264 selector by thread:Executor task launch worker for task 193898 on Wed Aug 01 09:45:21 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#265 selector by thread:Executor task launch worker for task 193907 on Wed Aug 01 09:45:21 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#266 selector by thread:Executor task launch worker for task 193913 on Wed Aug 01 09:45:21 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96) | |
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53) | |
at org.apache.spark.scheduler.Task.run(Task.scala:109) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | |
at java.lang.Thread.run(Thread.java:745) | |
#267 selector by thread:Executor task launch worker for task 193920 on Wed Aug 01 09:45:21 UTC 2018 | |
at java.nio.channels.spi.AbstractSelector.<init>(AbstractSelector.java:86) | |
at sun.nio.ch.SelectorImpl.<init>(SelectorImpl.java:54) | |
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:64) | |
at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:36) | |
at java.nio.channels.Selector.open(Selector.java:227) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:109) | |
at org.apache.kafka.common.network.Selector.<init>(Selector.java:136) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:557) | |
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:540) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.createConsumer(KafkaDataConsumer.scala:100) | |
at org.apache.spark.sql.kafka010.InternalKafkaConsumer.<init>(KafkaDataConsumer.scala:85) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$lzycompute$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.newInternalConsumer$1(KafkaDataConsumer.scala:432) | |
at org.apache.spark.sql.kafka010.KafkaDataConsumer$.acquire(KafkaDataConsumer.scala:460) | |
at org.apache.spark.sql.kafka010.KafkaSourceRDD.compute(KafkaSourceRDD.scala:129) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) | |
at org.apache.spark.rdd.RDD.computeO |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment