-
-
Save msukmanowsky/ede8c74577004f744b58 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
timezones = LOAD '$timezoneFile' AS (key:chararray, timezone:chararray); | |
data = LOAD '$input' USING PigStorage('\u0001'); | |
data = JOIN data BY $0 LEFT, timezones BY key USING 'replicated'; | |
data = FILTER data BY timezones::timezone IS NOT NULL; | |
/* If I do a DUMP data, script works just fine here. If I try to ILLUSTRATE joined I get the error below */ | |
-- DUMP data; | |
ILLUSTRATE data; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
2013-08-29 10:15:57,752 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.2-SNAPSHOT (rexported) compiled Aug 27 2013, 16:21:06 | |
2013-08-29 10:15:57,752 [main] INFO org.apache.pig.Main - Logging error messages to: /path/to/log/... | |
2013-08-29 10:15:57.885 java[3289:1703] Unable to load realm info from SCDynamicStore | |
2013-08-29 10:15:57,963 [main] INFO org.apache.pig.impl.util.Utils - Default bootup file /Users/mikesukmanowsky/.pigbootup not found | |
2013-08-29 10:15:58,037 [main] INFO org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to hadoop file system at: file:/// | |
2013-08-29 10:15:58,673 [main] INFO org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to hadoop file system at: file:/// | |
2013-08-29 10:15:58,934 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MRCompiler - File concatenation threshold: 100 optimistic? false | |
2013-08-29 10:15:58,948 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size before optimization: 2 | |
2013-08-29 10:15:58,949 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size after optimization: 2 | |
2013-08-29 10:15:58,956 [main] INFO org.apache.pig.tools.pigstats.ScriptState - Pig script settings are added to the job | |
2013-08-29 10:15:58,969 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - mapred.job.reduce.markreset.buffer.percent is not set, set to default 0.3 | |
2013-08-29 10:15:58,973 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - Using reducer estimator: org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator | |
2013-08-29 10:15:58,974 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator - BytesPerReducer=1000000000 maxReducers=999 totalInputFileSize=39874 | |
2013-08-29 10:15:58,974 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - Setting Parallelism to 1 | |
2013-08-29 10:15:58,995 [main] INFO org.apache.pig.data.SchemaTupleFrontend - Key [pig.schematuple] is false, will not generate code. | |
2013-08-29 10:15:58,995 [main] INFO org.apache.pig.data.SchemaTupleFrontend - Starting process to move generated code to distributed cacche | |
2013-08-29 10:15:58,995 [main] INFO org.apache.pig.data.SchemaTupleFrontend - Distributed cache not supported or needed in local mode. Setting key [pig.schematuple.local.dir] with code temp directory: /var/folders/z1/6bd5rfw52dv82_0_p1yw6w500000gn/T/1377785758995-0 | |
2013-08-29 10:15:59,013 [main] INFO org.apache.pig.tools.pigstats.ScriptState - Pig script settings are added to the job | |
2013-08-29 10:15:59,013 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - mapred.job.reduce.markreset.buffer.percent is not set, set to default 0.3 | |
2013-08-29 10:15:59,013 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - Using reducer estimator: org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator | |
2013-08-29 10:15:59,014 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator - BytesPerReducer=1000000000 maxReducers=999 totalInputFileSize=181589210 | |
2013-08-29 10:15:59,014 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - Setting Parallelism to 1 | |
2013-08-29 10:15:59,021 [main] INFO org.apache.pig.data.SchemaTupleFrontend - Key [pig.schematuple] is false, will not generate code. | |
2013-08-29 10:15:59,022 [main] INFO org.apache.pig.data.SchemaTupleFrontend - Starting process to move generated code to distributed cacche | |
2013-08-29 10:15:59,022 [main] INFO org.apache.pig.data.SchemaTupleFrontend - Distributed cache not supported or needed in local mode. Setting key [pig.schematuple.local.dir] with code temp directory: /var/folders/z1/6bd5rfw52dv82_0_p1yw6w500000gn/T/1377785759021-0 | |
2013-08-29 10:15:59,059 [main] INFO org.apache.pig.data.SchemaTupleBackend - Key [pig.schematuple] was not set... will not generate code. | |
2013-08-29 10:15:59,064 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigMapOnly$Map - Aliases being processed per job phase (AliasName[line,offset]): M: processed_logs[38,17] C: R: | |
2013-08-29 10:15:59,069 [main] INFO org.apache.hadoop.mapreduce.lib.input.FileInputFormat - Total input paths to process : 1 | |
2013-08-29 10:15:59,069 [main] INFO org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths to process : 1 | |
2013-08-29 10:15:59,080 [main] WARN org.apache.hadoop.util.NativeCodeLoader - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable | |
2013-08-29 10:15:59,262 [main] WARN org.apache.pig.data.SchemaTupleBackend - SchemaTupleBackend has already been initialized | |
2013-08-29 10:15:59,262 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigMapOnly$Map - Aliases being processed per job phase (AliasName[line,offset]): M: timezones[37,12] C: R: | |
2013-08-29 10:15:59,263 [main] INFO org.apache.hadoop.mapreduce.lib.input.FileInputFormat - Total input paths to process : 1 | |
2013-08-29 10:15:59,263 [main] INFO org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths to process : 1 | |
2013-08-29 10:15:59,318 [main] INFO org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MRCompiler - File concatenation threshold: 100 optimistic? false | |
org.apache.pig.impl.plan.optimizer.OptimizerException: ERROR 2087: Unexpected problem during optimization. Found index:0 in multiple LocalRearrange operators. | |
at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.plans.POPackageAnnotator$LoRearrangeDiscoverer.visitLocalRearrange(POPackageAnnotator.java:224) | |
at org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POLocalRearrange.visit(POLocalRearrange.java:157) | |
at org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POLocalRearrange.visit(POLocalRearrange.java:52) | |
at org.apache.pig.impl.plan.DepthFirstWalker.depthFirst(DepthFirstWalker.java:69) | |
at org.apache.pig.impl.plan.DepthFirstWalker.depthFirst(DepthFirstWalker.java:71) | |
at org.apache.pig.impl.plan.DepthFirstWalker.depthFirst(DepthFirstWalker.java:71) | |
at org.apache.pig.impl.plan.DepthFirstWalker.depthFirst(DepthFirstWalker.java:71) | |
at org.apache.pig.impl.plan.DepthFirstWalker.walk(DepthFirstWalker.java:52) | |
at org.apache.pig.impl.plan.PlanVisitor.visit(PlanVisitor.java:46) | |
at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.plans.POPackageAnnotator.patchPackage(POPackageAnnotator.java:125) | |
at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.plans.POPackageAnnotator.handlePackage(POPackageAnnotator.java:101) | |
at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.plans.POPackageAnnotator.visitMROp(POPackageAnnotator.java:89) | |
at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceOper.visit(MapReduceOper.java:273) | |
at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceOper.visit(MapReduceOper.java:46) | |
at org.apache.pig.impl.plan.DepthFirstWalker.depthFirst(DepthFirstWalker.java:69) | |
at org.apache.pig.impl.plan.DepthFirstWalker.walk(DepthFirstWalker.java:52) | |
at org.apache.pig.impl.plan.PlanVisitor.visit(PlanVisitor.java:46) | |
at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher.compile(MapReduceLauncher.java:592) | |
at org.apache.pig.pen.LocalMapReduceSimulator.launchPig(LocalMapReduceSimulator.java:90) | |
at org.apache.pig.pen.ExampleGenerator.getData(ExampleGenerator.java:257) | |
at org.apache.pig.pen.ExampleGenerator.getData(ExampleGenerator.java:238) | |
at org.apache.pig.pen.LineageTrimmingVisitor.init(LineageTrimmingVisitor.java:103) | |
at org.apache.pig.pen.LineageTrimmingVisitor.<init>(LineageTrimmingVisitor.java:98) | |
at org.apache.pig.pen.ExampleGenerator.getExamples(ExampleGenerator.java:166) | |
at org.apache.pig.PigServer.getExamples(PigServer.java:1180) | |
at org.apache.pig.tools.grunt.GruntParser.processIllustrate(GruntParser.java:739) | |
at org.apache.pig.tools.pigscript.parser.PigScriptParser.Illustrate(PigScriptParser.java:626) | |
at org.apache.pig.tools.pigscript.parser.PigScriptParser.parse(PigScriptParser.java:323) | |
at org.apache.pig.tools.grunt.GruntParser.parseStopOnError(GruntParser.java:194) | |
at org.apache.pig.tools.grunt.GruntParser.parseStopOnError(GruntParser.java:170) | |
at org.apache.pig.tools.grunt.Grunt.exec(Grunt.java:84) | |
at org.apache.pig.Main.run(Main.java:604) | |
at org.apache.pig.Main.main(Main.java:157) | |
2013-08-29 10:15:59,327 [main] ERROR org.apache.pig.tools.grunt.Grunt - ERROR 2087: Unexpected problem during optimization. Found index:0 in multiple LocalRearrange operators. |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment