Created
January 30, 2020 14:29
-
-
Save jdenisgiguere/fe3d274d1baf2ba2730c920ff8abd128 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Exception in thread "main" org.apache.spark.sql.AnalysisException: USING column `index` cannot be resolved on the right side of the join. The right-side columns: []; | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$99$$anonfun$apply$74.apply(Analyzer.scala:2341) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$99$$anonfun$apply$74.apply(Analyzer.scala:2341) | |
rf085reader_1 | at scala.Option.getOrElse(Option.scala:121) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$99.apply(Analyzer.scala:2340) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$99.apply(Analyzer.scala:2339) | |
rf085reader_1 | at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) | |
rf085reader_1 | at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) | |
rf085reader_1 | at scala.collection.immutable.List.foreach(List.scala:392) | |
rf085reader_1 | at scala.collection.TraversableLike$class.map(TraversableLike.scala:234) | |
rf085reader_1 | at scala.collection.immutable.List.map(List.scala:296) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$commonNaturalJoinProcessing(Analyzer.scala:2339) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNaturalAndUsingJoin$$anonfun$apply$34.applyOrElse(Analyzer.scala:2223) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNaturalAndUsingJoin$$anonfun$apply$34.applyOrElse(Analyzer.scala:2220) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1$$anonfun$apply$1.apply(AnalysisHelper.scala:90) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1$$anonfun$apply$1.apply(AnalysisHelper.scala:90) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1.apply(AnalysisHelper.scala:89) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1.apply(AnalysisHelper.scala:86) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:194) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.resolveOperatorsUp(AnalysisHelper.scala:86) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:29) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNaturalAndUsingJoin$.apply(Analyzer.scala:2220) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNaturalAndUsingJoin$.apply(Analyzer.scala:2219) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:87) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:84) | |
rf085reader_1 | at scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:124) | |
rf085reader_1 | at scala.collection.immutable.List.foldLeft(List.scala:84) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:84) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76) | |
rf085reader_1 | at scala.collection.immutable.List.foreach(List.scala:392) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:127) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:121) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:106) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:105) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:201) | |
rf085reader_1 | at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:105) | |
rf085reader_1 | at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:57) | |
rf085reader_1 | at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:55) | |
rf085reader_1 | at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:47) | |
rf085reader_1 | at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:78) | |
rf085reader_1 | at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$withPlan(Dataset.scala:3412) | |
rf085reader_1 | at org.apache.spark.sql.Dataset.join(Dataset.scala:944) | |
rf085reader_1 | at org.apache.spark.sql.Dataset.join(Dataset.scala:913) | |
rf085reader_1 | at org.locationtech.rasterframes.datasource.geotrellis.GeoTrellisCatalog$GeoTrellisCatalogRelation.layers$lzycompute(GeoTrellisCatalog.scala:100) | |
rf085reader_1 | at org.locationtech.rasterframes.datasource.geotrellis.GeoTrellisCatalog$GeoTrellisCatalogRelation.layers(GeoTrellisCatalog.scala:64) | |
rf085reader_1 | at org.locationtech.rasterframes.datasource.geotrellis.GeoTrellisCatalog$GeoTrellisCatalogRelation.schema(GeoTrellisCatalog.scala:103) | |
rf085reader_1 | at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:403) | |
rf085reader_1 | at org.apache.spark.sql.DataFrameReader.loadV1Source(DataFrameReader.scala:223) | |
rf085reader_1 | at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:211) | |
rf085reader_1 | at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:178) | |
rf085reader_1 | at org.locationtech.rasterframes.datasource.geotrellis.package$DataFrameReaderHasGeotrellisFormat.geotrellisCatalog(package.scala:52) | |
rf085reader_1 | at io.anagraph.zazjxb4u.RfReader$.main(RfReader.scala:51) | |
rf085reader_1 | at io.anagraph.zazjxb4u.RfReader.main(RfReader.scala) | |
rf085reader_1 | at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) | |
rf085reader_1 | at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) | |
rf085reader_1 | at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) | |
rf085reader_1 | at java.lang.reflect.Method.invoke(Method.java:498) | |
rf085reader_1 | at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52) | |
rf085reader_1 | at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:845) | |
rf085reader_1 | at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:161) | |
rf085reader_1 | at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184) | |
rf085reader_1 | at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86) | |
rf085reader_1 | at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:920) | |
rf085reader_1 | at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:929) | |
rf085reader_1 | at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment