Skip to content

Instantly share code, notes, and snippets.

@Habitats
Created April 27, 2016 22:10
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Habitats/9f86775c13dbfe958e1817142398dd9a to your computer and use it in GitHub Desktop.
Save Habitats/9f86775c13dbfe958e1817142398dd9a to your computer and use it in GitHub Desktop.
java.lang.IllegalStateException: Unable to get number of of columns for a non 2d matrix
at org.nd4j.linalg.api.ndarray.BaseNDArray.columns(BaseNDArray.java:3443) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.nd4j.linalg.dataset.DataSet.merge(DataSet.java:117) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:85) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:49) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:262) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.Task.run(Task.scala:88) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_60]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_60]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_60]
00:05:57.000 ERROR - Exception in task 0.0 in stage 6.0 (TID 173)
java.lang.IllegalStateException: Unable to get number of of columns for a non 2d matrix
at org.nd4j.linalg.api.ndarray.BaseNDArray.columns(BaseNDArray.java:3443) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.nd4j.linalg.dataset.DataSet.merge(DataSet.java:117) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:85) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:49) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:262) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.Task.run(Task.scala:88) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_60]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_60]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_60]
00:05:57.000 ERROR - Exception in task 6.0 in stage 6.0 (TID 179)
java.lang.IllegalStateException: Unable to get number of of columns for a non 2d matrix
at org.nd4j.linalg.api.ndarray.BaseNDArray.columns(BaseNDArray.java:3443) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.nd4j.linalg.dataset.DataSet.merge(DataSet.java:117) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:85) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:49) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:262) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.Task.run(Task.scala:88) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_60]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_60]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_60]
00:05:57.000 ERROR - Exception in task 5.0 in stage 6.0 (TID 178)
java.lang.IllegalStateException: Unable to get number of of columns for a non 2d matrix
at org.nd4j.linalg.api.ndarray.BaseNDArray.columns(BaseNDArray.java:3443) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.nd4j.linalg.dataset.DataSet.merge(DataSet.java:117) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:85) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:49) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:262) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.Task.run(Task.scala:88) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_60]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_60]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_60]
00:05:57.001 ERROR - Exception in task 4.0 in stage 6.0 (TID 177)
java.lang.IllegalStateException: Unable to get number of of columns for a non 2d matrix
at org.nd4j.linalg.api.ndarray.BaseNDArray.columns(BaseNDArray.java:3443) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.nd4j.linalg.dataset.DataSet.merge(DataSet.java:117) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:85) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:49) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:262) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.Task.run(Task.scala:88) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_60]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_60]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_60]
00:05:57.002 ERROR - Exception in task 7.0 in stage 6.0 (TID 180)
java.lang.IllegalStateException: Unable to get number of of columns for a non 2d matrix
at org.nd4j.linalg.api.ndarray.BaseNDArray.columns(BaseNDArray.java:3443) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.nd4j.linalg.dataset.DataSet.merge(DataSet.java:117) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:85) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:49) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:262) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.Task.run(Task.scala:88) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_60]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_60]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_60]
00:05:57.002 ERROR - Exception in task 3.0 in stage 6.0 (TID 176)
java.lang.IllegalStateException: Unable to get number of of columns for a non 2d matrix
at org.nd4j.linalg.api.ndarray.BaseNDArray.columns(BaseNDArray.java:3443) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.nd4j.linalg.dataset.DataSet.merge(DataSet.java:117) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:85) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:49) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:262) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.Task.run(Task.scala:88) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_60]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_60]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_60]
00:05:57.004 ERROR - Exception in task 1.0 in stage 6.0 (TID 174)
java.lang.IllegalStateException: Unable to get number of of columns for a non 2d matrix
at org.nd4j.linalg.api.ndarray.BaseNDArray.columns(BaseNDArray.java:3443) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.nd4j.linalg.dataset.DataSet.merge(DataSet.java:117) ~[nd4j-api-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:85) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:49) ~[dl4j-spark-0.4-rc3.9-SNAPSHOT.jar:na]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:262) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.scheduler.Task.run(Task.scala:88) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) ~[spark-core_2.10-1.5.2.jar:1.5.2]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_60]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_60]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_60]
00:05:57.012 WARN - Lost task 3.0 in stage 6.0 (TID 176, localhost): java.lang.IllegalStateException: Unable to get number of of columns for a non 2d matrix
at org.nd4j.linalg.api.ndarray.BaseNDArray.columns(BaseNDArray.java:3443)
at org.nd4j.linalg.dataset.DataSet.merge(DataSet.java:117)
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:85)
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:49)
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167)
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300)
at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:262)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
at org.apache.spark.scheduler.Task.run(Task.scala:88)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
00:05:57.014 ERROR - Task 3 in stage 6.0 failed 1 times; aborting job
Exception in thread "main" org.apache.spark.SparkException: Job aborted due to stage failure: Task 3 in stage 6.0 failed 1 times, most recent failure: Lost task 3.0 in stage 6.0 (TID 176, localhost): java.lang.IllegalStateException: Unable to get number of of columns for a non 2d matrix
at org.nd4j.linalg.api.ndarray.BaseNDArray.columns(BaseNDArray.java:3443)
at org.nd4j.linalg.dataset.DataSet.merge(DataSet.java:117)
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:85)
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:49)
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167)
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300)
at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:262)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
at org.apache.spark.scheduler.Task.run(Task.scala:88)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1283)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1271)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1270)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1270)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:697)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:697)
at scala.Option.foreach(Option.scala:236)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:697)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1496)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1458)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1447)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:567)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1824)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1837)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1850)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1921)
at org.apache.spark.rdd.RDD$$anonfun$foreach$1.apply(RDD.scala:894)
at org.apache.spark.rdd.RDD$$anonfun$foreach$1.apply(RDD.scala:892)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:147)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:108)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:310)
at org.apache.spark.rdd.RDD.foreach(RDD.scala:892)
at org.apache.spark.api.java.JavaRDDLike$class.foreach(JavaRDDLike.scala:330)
at org.apache.spark.api.java.AbstractJavaRDDLike.foreach(JavaRDDLike.scala:47)
at org.deeplearning4j.spark.impl.multilayer.SparkDl4jMultiLayer.runIteration(SparkDl4jMultiLayer.java:430)
at org.deeplearning4j.spark.impl.multilayer.SparkDl4jMultiLayer.fitDataSet(SparkDl4jMultiLayer.java:350)
at org.deeplearning4j.spark.impl.multilayer.SparkDl4jMultiLayer.fitDataSet(SparkDl4jMultiLayer.java:316)
at org.deeplearning4j.spark.impl.multilayer.SparkDl4jMultiLayer.fitDataSet(SparkDl4jMultiLayer.java:282)
at no.habitats.corpus.dl4j.FreebaseW2V$$anonfun$trainSparkFFN$1.apply$mcVI$sp(FreebaseW2V.scala:68)
at scala.collection.immutable.Range.foreach$mVc$sp(Range.scala:141)
at no.habitats.corpus.dl4j.FreebaseW2V$.trainSparkFFN(FreebaseW2V.scala:67)
at no.habitats.corpus.spark.SparkUtil$.trainFFNSpark(SparkUtil.scala:96)
at no.habitats.corpus.spark.SparkUtil$.main(SparkUtil.scala:69)
at no.habitats.corpus.spark.SparkUtil.main(SparkUtil.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at com.intellij.rt.execution.application.AppMain.main(AppMain.java:144)
Caused by: java.lang.IllegalStateException: Unable to get number of of columns for a non 2d matrix
at org.nd4j.linalg.api.ndarray.BaseNDArray.columns(BaseNDArray.java:3443)
at org.nd4j.linalg.dataset.DataSet.merge(DataSet.java:117)
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:85)
at org.deeplearning4j.spark.impl.multilayer.IterativeReduceFlatMap.call(IterativeReduceFlatMap.java:49)
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167)
at org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$5$1.apply(JavaRDDLike.scala:167)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:710)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300)
at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:262)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
at org.apache.spark.scheduler.Task.run(Task.scala:88)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Process finished with exit code 1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment