public
Created

Apache PIG - java.lang.UnsupportedOperationException: getJobTrackerAddrs is not supported

  • Download Gist
gistfile1.txt
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
org.apache.pig.impl.logicalLayer.FrontendException: ERROR 1002: Unable to store alias XYZ_active_users
at org.apache.pig.PigServer$Graph.registerQuery(PigServer.java:1552)
at org.apache.pig.PigServer.registerQuery(PigServer.java:540)
at org.apache.pig.tools.grunt.GruntParser.processPig(GruntParser.java:970)
at org.apache.pig.tools.pigscript.parser.PigScriptParser.parse(PigScriptParser.java:386)
at org.apache.pig.tools.grunt.GruntParser.parseStopOnError(GruntParser.java:189)
at org.apache.pig.PigServer.registerScript(PigServer.java:614)
at org.apache.pig.PigServer.registerScript(PigServer.java:716)
at org.apache.pig.PigServer.registerScript(PigServer.java:689)
at com.bsb.hike.analytics.pig.BaseHikePigTask.run(BaseHikePigTask.java:148)
at com.bsb.hike.analytics.pig.HikePigTaskManager.main(HikePigTaskManager.java:553)
Caused by: org.apache.pig.backend.executionengine.ExecException: ERROR 2117: Unexpected error when launching map reduce job.
at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher.launchPig(MapReduceLauncher.java:322)
at org.apache.pig.PigServer.launchPlan(PigServer.java:1270)
at org.apache.pig.PigServer.executeCompiledLogicalPlan(PigServer.java:1255)
at org.apache.pig.PigServer.execute(PigServer.java:1245)
at org.apache.pig.PigServer.access$400(PigServer.java:127)
at org.apache.pig.PigServer$Graph.registerQuery(PigServer.java:1547)
... 9 more
Caused by: java.lang.RuntimeException: Could not resolve error that occured when launching map reduce job: java.lang.UnsupportedOperationException: getJobTrackerAddrs is not supported
at org.apache.hadoop.fs.FileSystem.getJobTrackerAddrs(FileSystem.java:1796)
at org.apache.hadoop.ipc.RPC$FailoverInvoker.invoke(RPC.java:525)
at org.apache.hadoop.mapred.$Proxy1.getStagingAreaDir(Unknown Source)
at org.apache.hadoop.mapred.JobClient.getStagingAreaDir(JobClient.java:1270)
at org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir(JobSubmissionFiles.java:102)
at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:875)
at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:869)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1126)
at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:869)
at org.apache.hadoop.mapred.JobClient.submitJob(JobClient.java:843)
at org.apache.hadoop.mapred.jobcontrol.Job.submit(Job.java:378)
at org.apache.hadoop.mapred.jobcontrol.JobControl.startReadyJobs(JobControl.java:247)
at org.apache.hadoop.mapred.jobcontrol.JobControl.run(JobControl.java:279)
at java.lang.Thread.run(Thread.java:722)
at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher$1.run(MapReduceLauncher.java:260)
 
at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher$JobControlThreadExceptionHandler.uncaughtException(MapReduceLauncher.java:631)
at java.lang.Thread.dispatchUncaughtException(Thread.java:1964)

Besides, these variables are already set in the shell from where I am running, but I'm still getting the error:

root@hadoop2:/home/hduser/analytics_apache/analytics# echo $HADOOP_HOME $HADOOP_CONF_DIR $PIG_HOME $PIG_CLASSPATH
/usr/local/hadoop-1.1.0 /usr/local/hadoop-1.1.0/conf /usr/local/pig-0.10.0 /usr/local/pig-0.10.0/pig-0.10.0.jar
root@hadoop2:/home/hduser/analytics_apache/analytics#

And the Map-Reduce Job waits at 0% before throwing the above exception:

 [java] Thu Nov 22 07:28:10 2012 org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler INFO BytesPerReducer=1000000000 maxReducers=999 totalInputFileSize=0
 [java] Thu Nov 22 07:28:10 2012 org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler INFO Neither PARALLEL nor default parallelism is set for this job. Setting number of reducers to 1
 [java] Thu Nov 22 07:28:10 2012 org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher INFO 1 map-reduce job(s) waiting for submission.
 [java] Thu Nov 22 07:28:11 2012 org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher INFO 0% complete

Even while executing a basic PIG script (shown below), using PigServer in Java, the following exception is shown:

REGISTER /usr/local/hadoop-1.1.0/lib/hadoop-lzo-0.4.15.jar;
basicLoad = LOAD '/data/analytics/messages';
dump basicLoad;
org.apache.pig.impl.logicalLayer.FrontendException: ERROR 1066: Unable to open iterator for alias basicLoad
        at org.apache.pig.PigServer.openIterator(PigServer.java:857)
        at org.apache.pig.tools.grunt.GruntParser.processDump(GruntParser.java:682)
        at org.apache.pig.tools.pigscript.parser.PigScriptParser.parse(PigScriptParser.java:303)
        at org.apache.pig.tools.grunt.GruntParser.parseStopOnError(GruntParser.java:189)
        at org.apache.pig.PigServer.registerScript(PigServer.java:614)
        at org.apache.pig.PigServer.registerScript(PigServer.java:716)
        at org.apache.pig.PigServer.registerScript(PigServer.java:689)
        at com.bsb.hike.analytics.pig.BaseHikePigTask.run(BaseHikePigTask.java:148)
        at com.bsb.hike.analytics.pig.HikePigTaskManager.main(HikePigTaskManager.java:564)
Caused by: org.apache.pig.PigException: ERROR 1002: Unable to store alias basicLoad
        at org.apache.pig.PigServer.storeEx(PigServer.java:956)
        at org.apache.pig.PigServer.store(PigServer.java:919)
        at org.apache.pig.PigServer.openIterator(PigServer.java:832)
        ... 8 more
Caused by: org.apache.pig.backend.executionengine.ExecException: ERROR 2117: Unexpected error when launching map reduce job.
        at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher.launchPig(MapReduceLauncher.java:322)
        at org.apache.pig.PigServer.launchPlan(PigServer.java:1270)
        at org.apache.pig.PigServer.executeCompiledLogicalPlan(PigServer.java:1255)
        at org.apache.pig.PigServer.storeEx(PigServer.java:952)
        ... 10 more

Exception Continued:

Caused by: java.lang.RuntimeException: Could not resolve error that occured when launching map reduce job: java.lang.UnsupportedOperationException: getJobTrackerAddrs is not supported
        at org.apache.hadoop.fs.FileSystem.getJobTrackerAddrs(FileSystem.java:1796)
        at org.apache.hadoop.ipc.RPC$FailoverInvoker.invoke(RPC.java:525)
        at org.apache.hadoop.mapred.$Proxy1.getStagingAreaDir(Unknown Source)
        at org.apache.hadoop.mapred.JobClient.getStagingAreaDir(JobClient.java:1270)
        at org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir(JobSubmissionFiles.java:102)
        at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:875)
        at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:869)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1126)
        at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:869)
        at org.apache.hadoop.mapred.JobClient.submitJob(JobClient.java:843)
        at org.apache.hadoop.mapred.jobcontrol.Job.submit(Job.java:378)
        at org.apache.hadoop.mapred.jobcontrol.JobControl.startReadyJobs(JobControl.java:247)
        at org.apache.hadoop.mapred.jobcontrol.JobControl.run(JobControl.java:279)
        at java.lang.Thread.run(Thread.java:722)
        at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher$1.run(MapReduceLauncher.java:260)

        at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher$JobControlThreadExceptionHandler.uncaughtException(MapReduceLauncher.java:631)
        at java.lang.Thread.dispatchUncaughtException(Thread.java:1964)

Please sign in to comment on this gist.

Something went wrong with that request. Please try again.