Created
February 27, 2021 21:10
-
-
Save nsivabalan/cafc53fc9a8681923e4e2fa4eb2133fe to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
scala> dfFromData8.write.format("hudi"). | |
| options(getQuickstartWriteConfigs). | |
| option(PRECOMBINE_FIELD_OPT_KEY, "preComb"). | |
| option(RECORDKEY_FIELD_OPT_KEY, "rowId"). | |
| option(PARTITIONPATH_FIELD_OPT_KEY, "partitionId"). | |
| option("hoodie.index.type","SIMPLE"). | |
| option(TABLE_NAME, tableName). | |
| mode(Append). | |
| save(basePath) | |
21/02/27 16:09:10 ERROR BoundedInMemoryExecutor: error producing records | |
org.apache.parquet.io.InvalidRecordException: Parquet/Avro schema mismatch: Avro field 'toBeDeletedStr' not found | |
at org.apache.parquet.avro.AvroRecordConverter.getAvroField(AvroRecordConverter.java:225) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:130) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:95) | |
at org.apache.parquet.avro.AvroRecordMaterializer.<init>(AvroRecordMaterializer.java:33) | |
at org.apache.parquet.avro.AvroReadSupport.prepareForRead(AvroReadSupport.java:138) | |
at org.apache.parquet.hadoop.InternalParquetRecordReader.initialize(InternalParquetRecordReader.java:183) | |
at org.apache.parquet.hadoop.ParquetReader.initReader(ParquetReader.java:156) | |
at org.apache.parquet.hadoop.ParquetReader.read(ParquetReader.java:135) | |
at org.apache.hudi.common.util.ParquetReaderIterator.hasNext(ParquetReaderIterator.java:49) | |
at org.apache.hudi.common.util.queue.IteratorBasedQueueProducer.produce(IteratorBasedQueueProducer.java:45) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$0(BoundedInMemoryExecutor.java:92) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) | |
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) | |
at java.base/java.lang.Thread.run(Thread.java:834) | |
21/02/27 16:09:11 ERROR BoundedInMemoryExecutor: error consuming records 1) / 1] | |
org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.throwExceptionIfFailed(BoundedInMemoryQueue.java:247) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.readNextRecord(BoundedInMemoryQueue.java:226) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.access$100(BoundedInMemoryQueue.java:52) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue$QueueIterator.hasNext(BoundedInMemoryQueue.java:277) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueueConsumer.consume(BoundedInMemoryQueueConsumer.java:36) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$2(BoundedInMemoryExecutor.java:121) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) | |
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) | |
at java.base/java.lang.Thread.run(Thread.java:834) | |
Caused by: org.apache.parquet.io.InvalidRecordException: Parquet/Avro schema mismatch: Avro field 'toBeDeletedStr' not found | |
at org.apache.parquet.avro.AvroRecordConverter.getAvroField(AvroRecordConverter.java:225) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:130) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:95) | |
at org.apache.parquet.avro.AvroRecordMaterializer.<init>(AvroRecordMaterializer.java:33) | |
at org.apache.parquet.avro.AvroReadSupport.prepareForRead(AvroReadSupport.java:138) | |
at org.apache.parquet.hadoop.InternalParquetRecordReader.initialize(InternalParquetRecordReader.java:183) | |
at org.apache.parquet.hadoop.ParquetReader.initReader(ParquetReader.java:156) | |
at org.apache.parquet.hadoop.ParquetReader.read(ParquetReader.java:135) | |
at org.apache.hudi.common.util.ParquetReaderIterator.hasNext(ParquetReaderIterator.java:49) | |
at org.apache.hudi.common.util.queue.IteratorBasedQueueProducer.produce(IteratorBasedQueueProducer.java:45) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$0(BoundedInMemoryExecutor.java:92) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) | |
... 4 more | |
21/02/27 16:09:11 ERROR BaseSparkCommitActionExecutor: Error upserting bucketType UPDATE for partition :0 | |
org.apache.hudi.exception.HoodieException: org.apache.hudi.exception.HoodieException: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.table.action.commit.SparkMergeHelper.runMerge(SparkMergeHelper.java:102) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdateInternal(BaseSparkCommitActionExecutor.java:308) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdate(BaseSparkCommitActionExecutor.java:299) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpsertPartition(BaseSparkCommitActionExecutor.java:272) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.lambda$execute$ecf5068c$1(BaseSparkCommitActionExecutor.java:135) | |
at org.apache.spark.api.java.JavaRDDLike.$anonfun$mapPartitionsWithIndex$1(JavaRDDLike.scala:102) | |
at org.apache.spark.api.java.JavaRDDLike.$anonfun$mapPartitionsWithIndex$1$adapted(JavaRDDLike.scala:102) | |
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndex$2(RDD.scala:889) | |
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndex$2$adapted(RDD.scala:889) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.$anonfun$getOrCompute$1(RDD.scala:362) | |
at org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1388) | |
at org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1298) | |
at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1362) | |
at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1186) | |
at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:360) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:311) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) | |
at org.apache.spark.scheduler.Task.run(Task.scala:127) | |
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:446) | |
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:449) | |
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) | |
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) | |
at java.base/java.lang.Thread.run(Thread.java:834) | |
Caused by: org.apache.hudi.exception.HoodieException: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.execute(BoundedInMemoryExecutor.java:143) | |
at org.apache.hudi.table.action.commit.SparkMergeHelper.runMerge(SparkMergeHelper.java:100) | |
... 31 more | |
Caused by: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at java.base/java.util.concurrent.FutureTask.report(FutureTask.java:122) | |
at java.base/java.util.concurrent.FutureTask.get(FutureTask.java:191) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.execute(BoundedInMemoryExecutor.java:141) | |
... 32 more | |
Caused by: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.throwExceptionIfFailed(BoundedInMemoryQueue.java:247) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.readNextRecord(BoundedInMemoryQueue.java:226) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.access$100(BoundedInMemoryQueue.java:52) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue$QueueIterator.hasNext(BoundedInMemoryQueue.java:277) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueueConsumer.consume(BoundedInMemoryQueueConsumer.java:36) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$2(BoundedInMemoryExecutor.java:121) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
... 3 more | |
Caused by: org.apache.parquet.io.InvalidRecordException: Parquet/Avro schema mismatch: Avro field 'toBeDeletedStr' not found | |
at org.apache.parquet.avro.AvroRecordConverter.getAvroField(AvroRecordConverter.java:225) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:130) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:95) | |
at org.apache.parquet.avro.AvroRecordMaterializer.<init>(AvroRecordMaterializer.java:33) | |
at org.apache.parquet.avro.AvroReadSupport.prepareForRead(AvroReadSupport.java:138) | |
at org.apache.parquet.hadoop.InternalParquetRecordReader.initialize(InternalParquetRecordReader.java:183) | |
at org.apache.parquet.hadoop.ParquetReader.initReader(ParquetReader.java:156) | |
at org.apache.parquet.hadoop.ParquetReader.read(ParquetReader.java:135) | |
at org.apache.hudi.common.util.ParquetReaderIterator.hasNext(ParquetReaderIterator.java:49) | |
at org.apache.hudi.common.util.queue.IteratorBasedQueueProducer.produce(IteratorBasedQueueProducer.java:45) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$0(BoundedInMemoryExecutor.java:92) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) | |
... 4 more | |
21/02/27 16:09:11 WARN BlockManager: Putting block rdd_373_0 failed due to exception org.apache.hudi.exception.HoodieUpsertException: Error upserting bucketType UPDATE for partition :0. | |
21/02/27 16:09:11 WARN BlockManager: Block rdd_373_0 could not be removed as it was not found on disk or in memory | |
21/02/27 16:09:11 ERROR Executor: Exception in task 0.0 in stage 190.0 (TID 1742) | |
org.apache.hudi.exception.HoodieUpsertException: Error upserting bucketType UPDATE for partition :0 | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpsertPartition(BaseSparkCommitActionExecutor.java:279) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.lambda$execute$ecf5068c$1(BaseSparkCommitActionExecutor.java:135) | |
at org.apache.spark.api.java.JavaRDDLike.$anonfun$mapPartitionsWithIndex$1(JavaRDDLike.scala:102) | |
at org.apache.spark.api.java.JavaRDDLike.$anonfun$mapPartitionsWithIndex$1$adapted(JavaRDDLike.scala:102) | |
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndex$2(RDD.scala:889) | |
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndex$2$adapted(RDD.scala:889) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.$anonfun$getOrCompute$1(RDD.scala:362) | |
at org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1388) | |
at org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1298) | |
at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1362) | |
at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1186) | |
at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:360) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:311) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) | |
at org.apache.spark.scheduler.Task.run(Task.scala:127) | |
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:446) | |
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:449) | |
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) | |
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) | |
at java.base/java.lang.Thread.run(Thread.java:834) | |
Caused by: org.apache.hudi.exception.HoodieException: org.apache.hudi.exception.HoodieException: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.table.action.commit.SparkMergeHelper.runMerge(SparkMergeHelper.java:102) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdateInternal(BaseSparkCommitActionExecutor.java:308) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdate(BaseSparkCommitActionExecutor.java:299) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpsertPartition(BaseSparkCommitActionExecutor.java:272) | |
... 28 more | |
Caused by: org.apache.hudi.exception.HoodieException: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.execute(BoundedInMemoryExecutor.java:143) | |
at org.apache.hudi.table.action.commit.SparkMergeHelper.runMerge(SparkMergeHelper.java:100) | |
... 31 more | |
Caused by: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at java.base/java.util.concurrent.FutureTask.report(FutureTask.java:122) | |
at java.base/java.util.concurrent.FutureTask.get(FutureTask.java:191) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.execute(BoundedInMemoryExecutor.java:141) | |
... 32 more | |
Caused by: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.throwExceptionIfFailed(BoundedInMemoryQueue.java:247) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.readNextRecord(BoundedInMemoryQueue.java:226) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.access$100(BoundedInMemoryQueue.java:52) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue$QueueIterator.hasNext(BoundedInMemoryQueue.java:277) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueueConsumer.consume(BoundedInMemoryQueueConsumer.java:36) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$2(BoundedInMemoryExecutor.java:121) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
... 3 more | |
Caused by: org.apache.parquet.io.InvalidRecordException: Parquet/Avro schema mismatch: Avro field 'toBeDeletedStr' not found | |
at org.apache.parquet.avro.AvroRecordConverter.getAvroField(AvroRecordConverter.java:225) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:130) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:95) | |
at org.apache.parquet.avro.AvroRecordMaterializer.<init>(AvroRecordMaterializer.java:33) | |
at org.apache.parquet.avro.AvroReadSupport.prepareForRead(AvroReadSupport.java:138) | |
at org.apache.parquet.hadoop.InternalParquetRecordReader.initialize(InternalParquetRecordReader.java:183) | |
at org.apache.parquet.hadoop.ParquetReader.initReader(ParquetReader.java:156) | |
at org.apache.parquet.hadoop.ParquetReader.read(ParquetReader.java:135) | |
at org.apache.hudi.common.util.ParquetReaderIterator.hasNext(ParquetReaderIterator.java:49) | |
at org.apache.hudi.common.util.queue.IteratorBasedQueueProducer.produce(IteratorBasedQueueProducer.java:45) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$0(BoundedInMemoryExecutor.java:92) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) | |
... 4 more | |
21/02/27 16:09:11 WARN TaskSetManager: Lost task 0.0 in stage 190.0 (TID 1742, sivabala-c02xg219jgh6.attlocal.net, executor driver): org.apache.hudi.exception.HoodieUpsertException: Error upserting bucketType UPDATE for partition :0 | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpsertPartition(BaseSparkCommitActionExecutor.java:279) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.lambda$execute$ecf5068c$1(BaseSparkCommitActionExecutor.java:135) | |
at org.apache.spark.api.java.JavaRDDLike.$anonfun$mapPartitionsWithIndex$1(JavaRDDLike.scala:102) | |
at org.apache.spark.api.java.JavaRDDLike.$anonfun$mapPartitionsWithIndex$1$adapted(JavaRDDLike.scala:102) | |
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndex$2(RDD.scala:889) | |
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndex$2$adapted(RDD.scala:889) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.$anonfun$getOrCompute$1(RDD.scala:362) | |
at org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1388) | |
at org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1298) | |
at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1362) | |
at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1186) | |
at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:360) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:311) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) | |
at org.apache.spark.scheduler.Task.run(Task.scala:127) | |
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:446) | |
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:449) | |
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) | |
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) | |
at java.base/java.lang.Thread.run(Thread.java:834) | |
Caused by: org.apache.hudi.exception.HoodieException: org.apache.hudi.exception.HoodieException: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.table.action.commit.SparkMergeHelper.runMerge(SparkMergeHelper.java:102) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdateInternal(BaseSparkCommitActionExecutor.java:308) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdate(BaseSparkCommitActionExecutor.java:299) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpsertPartition(BaseSparkCommitActionExecutor.java:272) | |
... 28 more | |
Caused by: org.apache.hudi.exception.HoodieException: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.execute(BoundedInMemoryExecutor.java:143) | |
at org.apache.hudi.table.action.commit.SparkMergeHelper.runMerge(SparkMergeHelper.java:100) | |
... 31 more | |
Caused by: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at java.base/java.util.concurrent.FutureTask.report(FutureTask.java:122) | |
at java.base/java.util.concurrent.FutureTask.get(FutureTask.java:191) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.execute(BoundedInMemoryExecutor.java:141) | |
... 32 more | |
Caused by: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.throwExceptionIfFailed(BoundedInMemoryQueue.java:247) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.readNextRecord(BoundedInMemoryQueue.java:226) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.access$100(BoundedInMemoryQueue.java:52) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue$QueueIterator.hasNext(BoundedInMemoryQueue.java:277) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueueConsumer.consume(BoundedInMemoryQueueConsumer.java:36) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$2(BoundedInMemoryExecutor.java:121) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
... 3 more | |
Caused by: org.apache.parquet.io.InvalidRecordException: Parquet/Avro schema mismatch: Avro field 'toBeDeletedStr' not found | |
at org.apache.parquet.avro.AvroRecordConverter.getAvroField(AvroRecordConverter.java:225) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:130) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:95) | |
at org.apache.parquet.avro.AvroRecordMaterializer.<init>(AvroRecordMaterializer.java:33) | |
at org.apache.parquet.avro.AvroReadSupport.prepareForRead(AvroReadSupport.java:138) | |
at org.apache.parquet.hadoop.InternalParquetRecordReader.initialize(InternalParquetRecordReader.java:183) | |
at org.apache.parquet.hadoop.ParquetReader.initReader(ParquetReader.java:156) | |
at org.apache.parquet.hadoop.ParquetReader.read(ParquetReader.java:135) | |
at org.apache.hudi.common.util.ParquetReaderIterator.hasNext(ParquetReaderIterator.java:49) | |
at org.apache.hudi.common.util.queue.IteratorBasedQueueProducer.produce(IteratorBasedQueueProducer.java:45) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$0(BoundedInMemoryExecutor.java:92) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) | |
... 4 more | |
21/02/27 16:09:11 ERROR TaskSetManager: Task 0 in stage 190.0 failed 1 times; aborting job | |
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 190.0 failed 1 times, most recent failure: Lost task 0.0 in stage 190.0 (TID 1742, sivabala-c02xg219jgh6.attlocal.net, executor driver): org.apache.hudi.exception.HoodieUpsertException: Error upserting bucketType UPDATE for partition :0 | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpsertPartition(BaseSparkCommitActionExecutor.java:279) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.lambda$execute$ecf5068c$1(BaseSparkCommitActionExecutor.java:135) | |
at org.apache.spark.api.java.JavaRDDLike.$anonfun$mapPartitionsWithIndex$1(JavaRDDLike.scala:102) | |
at org.apache.spark.api.java.JavaRDDLike.$anonfun$mapPartitionsWithIndex$1$adapted(JavaRDDLike.scala:102) | |
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndex$2(RDD.scala:889) | |
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndex$2$adapted(RDD.scala:889) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.$anonfun$getOrCompute$1(RDD.scala:362) | |
at org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1388) | |
at org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1298) | |
at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1362) | |
at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1186) | |
at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:360) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:311) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) | |
at org.apache.spark.scheduler.Task.run(Task.scala:127) | |
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:446) | |
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:449) | |
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) | |
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) | |
at java.base/java.lang.Thread.run(Thread.java:834) | |
Caused by: org.apache.hudi.exception.HoodieException: org.apache.hudi.exception.HoodieException: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.table.action.commit.SparkMergeHelper.runMerge(SparkMergeHelper.java:102) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdateInternal(BaseSparkCommitActionExecutor.java:308) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdate(BaseSparkCommitActionExecutor.java:299) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpsertPartition(BaseSparkCommitActionExecutor.java:272) | |
... 28 more | |
Caused by: org.apache.hudi.exception.HoodieException: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.execute(BoundedInMemoryExecutor.java:143) | |
at org.apache.hudi.table.action.commit.SparkMergeHelper.runMerge(SparkMergeHelper.java:100) | |
... 31 more | |
Caused by: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at java.base/java.util.concurrent.FutureTask.report(FutureTask.java:122) | |
at java.base/java.util.concurrent.FutureTask.get(FutureTask.java:191) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.execute(BoundedInMemoryExecutor.java:141) | |
... 32 more | |
Caused by: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.throwExceptionIfFailed(BoundedInMemoryQueue.java:247) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.readNextRecord(BoundedInMemoryQueue.java:226) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.access$100(BoundedInMemoryQueue.java:52) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue$QueueIterator.hasNext(BoundedInMemoryQueue.java:277) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueueConsumer.consume(BoundedInMemoryQueueConsumer.java:36) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$2(BoundedInMemoryExecutor.java:121) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
... 3 more | |
Caused by: org.apache.parquet.io.InvalidRecordException: Parquet/Avro schema mismatch: Avro field 'toBeDeletedStr' not found | |
at org.apache.parquet.avro.AvroRecordConverter.getAvroField(AvroRecordConverter.java:225) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:130) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:95) | |
at org.apache.parquet.avro.AvroRecordMaterializer.<init>(AvroRecordMaterializer.java:33) | |
at org.apache.parquet.avro.AvroReadSupport.prepareForRead(AvroReadSupport.java:138) | |
at org.apache.parquet.hadoop.InternalParquetRecordReader.initialize(InternalParquetRecordReader.java:183) | |
at org.apache.parquet.hadoop.ParquetReader.initReader(ParquetReader.java:156) | |
at org.apache.parquet.hadoop.ParquetReader.read(ParquetReader.java:135) | |
at org.apache.hudi.common.util.ParquetReaderIterator.hasNext(ParquetReaderIterator.java:49) | |
at org.apache.hudi.common.util.queue.IteratorBasedQueueProducer.produce(IteratorBasedQueueProducer.java:45) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$0(BoundedInMemoryExecutor.java:92) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) | |
... 4 more | |
Driver stacktrace: | |
at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2059) | |
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2008) | |
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2007) | |
at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62) | |
at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55) | |
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) | |
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2007) | |
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:973) | |
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:973) | |
at scala.Option.foreach(Option.scala:407) | |
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:973) | |
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2239) | |
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2188) | |
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2177) | |
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49) | |
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:775) | |
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2099) | |
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2120) | |
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2139) | |
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2164) | |
at org.apache.spark.rdd.RDD.count(RDD.scala:1227) | |
at org.apache.hudi.HoodieSparkSqlWriter$.commitAndPerformPostOperations(HoodieSparkSqlWriter.scala:433) | |
at org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:218) | |
at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:134) | |
at org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46) | |
at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70) | |
at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68) | |
at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:90) | |
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:175) | |
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:213) | |
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) | |
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:210) | |
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:171) | |
at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:122) | |
at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:121) | |
at org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:963) | |
at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:100) | |
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160) | |
at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:87) | |
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764) | |
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64) | |
at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:963) | |
at org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:415) | |
at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:399) | |
at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:288) | |
... 69 elided | |
Caused by: org.apache.hudi.exception.HoodieUpsertException: Error upserting bucketType UPDATE for partition :0 | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpsertPartition(BaseSparkCommitActionExecutor.java:279) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.lambda$execute$ecf5068c$1(BaseSparkCommitActionExecutor.java:135) | |
at org.apache.spark.api.java.JavaRDDLike.$anonfun$mapPartitionsWithIndex$1(JavaRDDLike.scala:102) | |
at org.apache.spark.api.java.JavaRDDLike.$anonfun$mapPartitionsWithIndex$1$adapted(JavaRDDLike.scala:102) | |
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndex$2(RDD.scala:889) | |
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndex$2$adapted(RDD.scala:889) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.$anonfun$getOrCompute$1(RDD.scala:362) | |
at org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1388) | |
at org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1298) | |
at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1362) | |
at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1186) | |
at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:360) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:311) | |
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) | |
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349) | |
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313) | |
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) | |
at org.apache.spark.scheduler.Task.run(Task.scala:127) | |
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:446) | |
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377) | |
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:449) | |
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) | |
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) | |
at java.base/java.lang.Thread.run(Thread.java:834) | |
Caused by: org.apache.hudi.exception.HoodieException: org.apache.hudi.exception.HoodieException: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.table.action.commit.SparkMergeHelper.runMerge(SparkMergeHelper.java:102) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdateInternal(BaseSparkCommitActionExecutor.java:308) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdate(BaseSparkCommitActionExecutor.java:299) | |
at org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpsertPartition(BaseSparkCommitActionExecutor.java:272) | |
... 28 more | |
Caused by: org.apache.hudi.exception.HoodieException: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.execute(BoundedInMemoryExecutor.java:143) | |
at org.apache.hudi.table.action.commit.SparkMergeHelper.runMerge(SparkMergeHelper.java:100) | |
... 31 more | |
Caused by: java.util.concurrent.ExecutionException: org.apache.hudi.exception.HoodieException: operation has failed | |
at java.base/java.util.concurrent.FutureTask.report(FutureTask.java:122) | |
at java.base/java.util.concurrent.FutureTask.get(FutureTask.java:191) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.execute(BoundedInMemoryExecutor.java:141) | |
... 32 more | |
Caused by: org.apache.hudi.exception.HoodieException: operation has failed | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.throwExceptionIfFailed(BoundedInMemoryQueue.java:247) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.readNextRecord(BoundedInMemoryQueue.java:226) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue.access$100(BoundedInMemoryQueue.java:52) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueue$QueueIterator.hasNext(BoundedInMemoryQueue.java:277) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryQueueConsumer.consume(BoundedInMemoryQueueConsumer.java:36) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$2(BoundedInMemoryExecutor.java:121) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
... 3 more | |
Caused by: org.apache.parquet.io.InvalidRecordException: Parquet/Avro schema mismatch: Avro field 'toBeDeletedStr' not found | |
at org.apache.parquet.avro.AvroRecordConverter.getAvroField(AvroRecordConverter.java:225) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:130) | |
at org.apache.parquet.avro.AvroRecordConverter.<init>(AvroRecordConverter.java:95) | |
at org.apache.parquet.avro.AvroRecordMaterializer.<init>(AvroRecordMaterializer.java:33) | |
at org.apache.parquet.avro.AvroReadSupport.prepareForRead(AvroReadSupport.java:138) | |
at org.apache.parquet.hadoop.InternalParquetRecordReader.initialize(InternalParquetRecordReader.java:183) | |
at org.apache.parquet.hadoop.ParquetReader.initReader(ParquetReader.java:156) | |
at org.apache.parquet.hadoop.ParquetReader.read(ParquetReader.java:135) | |
at org.apache.hudi.common.util.ParquetReaderIterator.hasNext(ParquetReaderIterator.java:49) | |
at org.apache.hudi.common.util.queue.IteratorBasedQueueProducer.produce(IteratorBasedQueueProducer.java:45) | |
at org.apache.hudi.common.util.queue.BoundedInMemoryExecutor.lambda$null$0(BoundedInMemoryExecutor.java:92) | |
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) | |
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) | |
... 4 more |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
you know how to solve it?