public
Last active

Problems with hive-server 0.10.0 - logs for problem report.

  • Download Gist
hive-default-1223123...xml
XML
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
<?xml version="1.0" encoding="UTF-8" standalone="no"?><configuration>
<property><name>hive.ppd.remove.duplicatefilters</name><value>true</value><source>programatically</source></property>
<property><name>hive.stats.jdbc.timeout</name><value>30</value><source>programatically</source></property>
<property><name>hive.skewjoin.mapjoin.map.tasks</name><value>10000</value><source>programatically</source></property>
<property><name>hadoop.bin.path</name><value>/usr/lib/hadoop/bin/hadoop</value><source>programatically</source></property>
<property><name>job.end.retry.interval</name><value>30000</value><source>mapred-default.xml</source></property>
<property><name>io.bytes.per.checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>hive.hwi.listen.port</name><value>9999</value><source>programatically</source></property>
<property><name>hive.skewjoin.key</name><value>100000</value><source>programatically</source></property>
<property><name>mapred.job.tracker.retiredjobs.cache.size</name><value>1000</value><source>mapred-default.xml</source></property>
<property><name>mapred.task.profile.reduces</name><value>0-2</value><source>mapred-default.xml</source></property>
<property><name>mapreduce.jobtracker.staging.root.dir</name><value>${hadoop.tmp.dir}/mapred/staging</value><source>mapred-default.xml</source></property>
<property><name>mapred.job.reuse.jvm.num.tasks</name><value>1</value><source>mapred-default.xml</source></property>
<property><name>javax.jdo.option.ConnectionURL</name><value>jdbc:derby:;databaseName=metastore_db;create=true</value><source>programatically</source></property>
<property><name>fs.AbstractFileSystem.file.impl</name><value>org.apache.hadoop.fs.local.LocalFs</value><source>core-default.xml</source></property>
<property><name>hive.cluster.delegation.token.store.zookeeper.acl</name><value/><source>programatically</source></property>
<property><name>mapred.reduce.tasks.speculative.execution</name><value>true</value><source>programatically</source></property>
<property><name>hive.lockmgr.zookeeper.default.partition.name</name><value>__HIVE_DEFAULT_ZOOKEEPER_PARTITION__</value><source>programatically</source></property>
<property><name>hive.mapred.mode</name><value>nonstrict</value><source>programatically</source></property>
<property><name>hive.exec.rcfile.use.explicit.header</name><value>true</value><source>programatically</source></property>
<property><name>hadoop.ssl.keystores.factory.class</name><value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value><source>core-default.xml</source></property>
<property><name>hive.metastore.ds.retry.attempts</name><value>1</value><source>programatically</source></property>
<property><name>hadoop.http.authentication.kerberos.keytab</name><value>${user.home}/hadoop.keytab</value><source>core-default.xml</source></property>
<property><name>io.seqfile.sorter.recordlimit</name><value>1000000</value><source>core-default.xml</source></property>
<property><name>hive.outerjoin.supports.filters</name><value>true</value><source>programatically</source></property>
<property><name>s3.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hive.exec.job.debug.timeout</name><value>30000</value><source>programatically</source></property>
<property><name>hadoop.relaxed.worker.version.check</name><value>true</value><source>mapred-default.xml</source></property>
<property><name>datanucleus.autoStartMechanismMode</name><value>checked</value><source>programatically</source></property>
<property><name>mapred.task.tracker.http.address</name><value>0.0.0.0:50060</value><source>mapred-default.xml</source></property>
<property><name>io.map.index.interval</name><value>128</value><source>core-default.xml</source></property>
<property><name>s3.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
<property><name>hive.intermediate.compression.type</name><value/><source>programatically</source></property>
<property><name>hive.enforce.bucketmapjoin</name><value>false</value><source>programatically</source></property>
<property><name>ha.zookeeper.session-timeout.ms</name><value>5000</value><source>core-default.xml</source></property>
<property><name>datanucleus.identifierFactory</name><value>datanucleus</value><source>programatically</source></property>
<property><name>mapred.system.dir</name><value>/mapred/system</value><source>mapred-site.xml</source></property>
<property><name>s3.replication</name><value>3</value><source>core-default.xml</source></property>
<property><name>hive.querylog.plan.progress.interval</name><value>60000</value><source>programatically</source></property>
<property><name>mapred.task.tracker.report.address</name><value>127.0.0.1:0</value><source>mapred-default.xml</source></property>
<property><name>hive.mapjoin.followby.gby.localtask.max.memory.usage</name><value>0.55</value><source>programatically</source></property>
<property><name>mapred.jobtracker.plugins</name><value>org.apache.hadoop.thriftfs.ThriftJobTrackerPlugin</value><source>mapred-site.xml</source></property>
<property><name>jobtracker.thrift.address</name><value>0.0.0.0:9290</value><source>mapred-site.xml</source></property>
<property><name>mapreduce.reduce.shuffle.connect.timeout</name><value>180000</value><source>mapred-default.xml</source></property>
<property><name>hive.multi.insert.move.tasks.share.dependencies</name><value>false</value><source>programatically</source></property>
<property><name>hadoop.ssl.enabled</name><value>false</value><source>core-default.xml</source></property>
<property><name>hive.metastore.archive.intermediate.archived</name><value>_INTERMEDIATE_ARCHIVED</value><source>programatically</source></property>
<property><name>ipc.client.connect.max.retries.on.timeouts</name><value>45</value><source>core-default.xml</source></property>
<property><name>hive.input.format</name><value>org.apache.hadoop.hive.ql.io.CombineHiveInputFormat</value><source>programatically</source></property>
<property><name>mapred.healthChecker.interval</name><value>60000</value><source>mapred-default.xml</source></property>
<property><name>mapreduce.job.complete.cancel.delegation.tokens</name><value>true</value><source>mapred-default.xml</source></property>
<property><name>hive.index.compact.query.max.entries</name><value>10000000</value><source>programatically</source></property>
<property><name>fs.trash.interval</name><value>1440</value><source>core-site.xml</source></property>
<property><name>hive.metastore.event.clean.freq</name><value>0</value><source>programatically</source></property>
<property><name>hadoop.jetty.logs.serve.aliases</name><value>true</value><source>core-default.xml</source></property>
<property><name>hive.heartbeat.interval</name><value>1000</value><source>programatically</source></property>
<property><name>hive.session.silent</name><value>false</value><source>programatically</source></property>
<property><name>hive.metastore.server.min.threads</name><value>200</value><source>programatically</source></property>
<property><name>mapred.skip.map.auto.incr.proc.count</name><value>true</value><source>mapred-default.xml</source></property>
<property><name>hadoop.http.authentication.kerberos.principal</name><value>HTTP/_HOST@LOCALHOST</value><source>core-default.xml</source></property>
<property><name>mapred.min.split.size.per.rack</name><value>1</value><source>programatically</source></property>
<property><name>hive.cli.print.current.db</name><value>false</value><source>programatically</source></property>
<property><name>hive.skewjoin.mapjoin.min.split</name><value>33554432</value><source>programatically</source></property>
<property><name>s3native.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>mapred.child.tmp</name><value>./tmp</value><source>mapred-default.xml</source></property>
<property><name>hive.exec.driver.run.hooks</name><value/><source>programatically</source></property>
<property><name>mapred.tasktracker.taskmemorymanager.monitoring-interval</name><value>5000</value><source>mapred-default.xml</source></property>
<property><name>hive.ppd.recognizetransivity</name><value>true</value><source>programatically</source></property>
<property><name>hive.merge.current.job.has.dynamic.partitions</name><value>false</value><source>programatically</source></property>
<property><name>hive.script.serde</name><value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value><source>programatically</source></property>
<property><name>hive.client.stats.publishers</name><value/><source>programatically</source></property>
<property><name>hive.stats.collect.tablekeys</name><value>false</value><source>programatically</source></property>
<property><name>hive.hmshandler.retry.interval</name><value>1000</value><source>programatically</source></property>
<property><name>hive.cli.print.header</name><value>false</value><source>programatically</source></property>
<property><name>javax.jdo.PersistenceManagerFactoryClass</name><value>org.datanucleus.jdo.JDOPersistenceManagerFactory</value><source>programatically</source></property>
<property><name>datanucleus.validateConstraints</name><value>false</value><source>programatically</source></property>
<property><name>hive.insert.into.multilevel.dirs</name><value>false</value><source>programatically</source></property>
<property><name>hive.map.aggr.hash.min.reduction</name><value>0.5</value><source>programatically</source></property>
<property><name>io.sort.spill.percent</name><value>0.80</value><source>mapred-default.xml</source></property>
<property><name>mapred.job.shuffle.input.buffer.percent</name><value>0.70</value><source>mapred-default.xml</source></property>
<property><name>hadoop.skip.worker.version.check</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>hive.exec.max.dynamic.partitions</name><value>1000</value><source>programatically</source></property>
<property><name>hive.variable.substitute</name><value>true</value><source>programatically</source></property>
<property><name>hive.limit.optimize.enable</name><value>false</value><source>programatically</source></property>
<property><name>hadoop.security.instrumentation.requires.admin</name><value>false</value><source>core-default.xml</source></property>
<property><name>mapred.skip.map.max.skip.records</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>hive.cluster.delegation.token.store.zookeeper.znode</name><value>/hive/cluster/delegation</value><source>programatically</source></property>
<property><name>mapreduce.reduce.shuffle.maxfetchfailures</name><value>10</value><source>mapred-default.xml</source></property>
<property><name>hadoop.security.authorization</name><value>false</value><source>core-default.xml</source></property>
<property><name>hive.query.result.fileformat</name><value>TextFile</value><source>programatically</source></property>
<property><name>hive.metastore.batch.retrieve.table.partition.max</name><value>1000</value><source>programatically</source></property>
<property><name>hadoop.security.group.mapping.ldap.search.filter.group</name><value>(objectClass=group)</value><source>core-default.xml</source></property>
<property><name>hive.zookeeper.session.timeout</name><value>600000</value><source>programatically</source></property>
<property><name>hive.jar.path</name><value/><source>programatically</source></property>
<property><name>hive.merge.mapredfiles</name><value>false</value><source>programatically</source></property>
<property><name>hive.stats.default.aggregator</name><value/><source>programatically</source></property>
<property><name>hive.mapjoin.smalltable.filesize</name><value>25000000</value><source>programatically</source></property>
<property><name>hive.join.emit.interval</name><value>1000</value><source>programatically</source></property>
<property><name>hive.start.cleanup.scratchdir</name><value>false</value><source>programatically</source></property>
<property><name>hive.enforce.bucketing</name><value>false</value><source>programatically</source></property>
<property><name>hive.optimize.ppd.storage</name><value>true</value><source>programatically</source></property>
<property><name>hive.merge.input.format.block.level</name><value>org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat</value><source>programatically</source></property>
<property><name>datanucleus.connectionPoolingType</name><value>DBCP</value><source>programatically</source></property>
<property><name>mapred.task.profile.maps</name><value>0-2</value><source>mapred-default.xml</source></property>
<property><name>hive.index.compact.query.max.size</name><value>10737418240</value><source>programatically</source></property>
<property><name>hive.optimize.index.filter.compact.minsize</name><value>5368709120</value><source>programatically</source></property>
<property><name>mapred.local.dir</name><value>/hadoop/mapred</value><source>mapred-site.xml</source></property>
<property><name>hadoop.security.group.mapping.ldap.search.attr.group.name</name><value>cn</value><source>core-default.xml</source></property>
<property><name>mapred.merge.recordsBeforeProgress</name><value>10000</value><source>mapred-default.xml</source></property>
<property><name>mapred.job.tracker.http.address</name><value>0.0.0.0:50030</value><source>mapred-default.xml</source></property>
<property><name>hive.security.authorization.manager</name><value>org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider</value><source>programatically</source></property>
<property><name>mapred.compress.map.output</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>mapred.userlog.retain.hours</name><value>24</value><source>mapred-default.xml</source></property>
<property><name>s3native.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>hive.error.on.empty.partition</name><value>false</value><source>programatically</source></property>
<property><name>tfile.fs.output.buffer.size</name><value>262144</value><source>core-default.xml</source></property>
<property><name>hive.mapper.cannot.span.multiple.partitions</name><value>false</value><source>programatically</source></property>
<property><name>hive.transform.escape.input</name><value>false</value><source>programatically</source></property>
<property><name>mapred.tasktracker.reduce.tasks.maximum</name><value>8</value><source>mapred-site.xml</source></property>
<property><name>fs.AbstractFileSystem.hdfs.impl</name><value>org.apache.hadoop.fs.Hdfs</value><source>core-default.xml</source></property>
<property><name>javax.jdo.option.ConnectionDriverName</name><value>org.apache.derby.jdbc.EmbeddedDriver</value><source>programatically</source></property>
<property><name>hive.optimize.skewjoin</name><value>false</value><source>programatically</source></property>
<property><name>javax.jdo.option.DetachAllOnCommit</name><value>true</value><source>programatically</source></property>
<property><name>mapred.disk.healthChecker.interval</name><value>60000</value><source>mapred-default.xml</source></property>
<property><name>mapred.cluster.map.memory.mb</name><value>-1</value><source>mapred-default.xml</source></property>
<property><name>hadoop.ssl.client.conf</name><value>ssl-client.xml</value><source>core-default.xml</source></property>
<property><name>hive.query.planid</name><value/><source>programatically</source></property>
<property><name>hive.optimize.union.remove</name><value>false</value><source>programatically</source></property>
<property><name>hive.metastore.ds.retry.interval</name><value>1000</value><source>programatically</source></property>
<property><name>hive.metastore.thrift.framed.transport.enabled</name><value>false</value><source>programatically</source></property>
<property><name>hive.hmshandler.force.reload.conf</name><value>false</value><source>programatically</source></property>
<property><name>hive.exec.show.job.failure.debug.info</name><value>true</value><source>programatically</source></property>
<property><name>hive.script.recordwriter</name><value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value><source>programatically</source></property>
<property><name>hive.intermediate.compression.codec</name><value/><source>programatically</source></property>
<property><name>hive.merge.size.per.task</name><value>256000000</value><source>programatically</source></property>
<property><name>hive.added.archives.path</name><value/><source>programatically</source></property>
<property><name>hive.stats.reliable</name><value>false</value><source>programatically</source></property>
<property><name>javax.jdo.option.ConnectionUserName</name><value>APP</value><source>programatically</source></property>
<property><name>hive.join.cache.size</name><value>25000</value><source>programatically</source></property>
<property><name>mapreduce.tasktracker.outofband.heartbeat</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>datanucleus.transactionIsolation</name><value>read-committed</value><source>programatically</source></property>
<property><name>io.native.lib.available</name><value>true</value><source>core-default.xml</source></property>
<property><name>hive.limit.optimize.fetch.max</name><value>50000</value><source>programatically</source></property>
<property><name>mapred.jobtracker.restart.recover</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>mapred.reduce.child.log.level</name><value>INFO</value><source>mapred-default.xml</source></property>
<property><name>hive.exec.mode.local.auto.inputbytes.max</name><value>134217728</value><source>programatically</source></property>
<property><name>mapreduce.shuffle.ssl.address</name><value>0.0.0.0</value><source>mapred-default.xml</source></property>
<property><name>hive.cluster.delegation.token.store.zookeeper.connectString</name><value/><source>programatically</source></property>
<property><name>hive.metastore.batch.retrieve.max</name><value>300</value><source>programatically</source></property>
<property><name>hive.mapred.reduce.tasks.speculative.execution</name><value>true</value><source>programatically</source></property>
<property><name>hive.optimize.skewjoin.compiletime</name><value>false</value><source>programatically</source></property>
<property><name>hive.binary.record.max.length</name><value>1000</value><source>programatically</source></property>
<property><name>mapred.inmem.merge.threshold</name><value>1000</value><source>mapred-default.xml</source></property>
<property><name>hive.mapjoin.size.key</name><value>10000</value><source>programatically</source></property>
<property><name>ipc.client.connection.maxidletime</name><value>10000</value><source>core-default.xml</source></property>
<property><name>mapred.max.split.size</name><value>256000000</value><source>programatically</source></property>
<property><name>hive.metastore.warehouse.dir</name><value>/user/hive/warehouse</value><source>programatically</source></property>
<property><name>mapreduce.shuffle.ssl.enabled</name><value>${hadoop.ssl.enabled}</value><source>mapred-default.xml</source></property>
<property><name>hive.io.exception.handlers</name><value/><source>programatically</source></property>
<property><name>hive.zookeeper.namespace</name><value>hive_zookeeper_namespace</value><source>programatically</source></property>
<property><name>hive.exec.parallel.thread.number</name><value>8</value><source>programatically</source></property>
<property><name>hive.map.aggr.hash.force.flush.memory.threshold</name><value>0.9</value><source>programatically</source></property>
<property><name>fs.s3.sleepTimeSeconds</name><value>10</value><source>core-default.xml</source></property>
<property><name>hive.added.files.path</name><value/><source>programatically</source></property>
<property><name>hadoop.ssl.server.conf</name><value>ssl-server.xml</value><source>core-default.xml</source></property>
<property><name>hive.optimize.metadataonly</name><value>true</value><source>programatically</source></property>
<property><name>mapred.reduce.tasks</name><value>-1</value><source>programatically</source></property>
<property><name>ha.zookeeper.parent-znode</name><value>/hadoop-ha</value><source>core-default.xml</source></property>
<property><name>mapred.queue.names</name><value>default</value><source>mapred-default.xml</source></property>
<property><name>io.seqfile.lazydecompress</name><value>true</value><source>core-default.xml</source></property>
<property><name>hive.exec.drop.ignorenonexistent</name><value>true</value><source>programatically</source></property>
<property><name>hive.test.mode.nosamplelist</name><value/><source>programatically</source></property>
<property><name>hive.test.mode.prefix</name><value>test_</value><source>programatically</source></property>
<property><name>hive.udtf.auto.progress</name><value>false</value><source>programatically</source></property>
<property><name>ipc.client.tcpnodelay</name><value>false</value><source>core-default.xml</source></property>
<property><name>hive.optimize.reducededuplication</name><value>true</value><source>programatically</source></property>
<property><name>hive.query.id</name><value/><source>programatically</source></property>
<property><name>mapred.acls.enabled</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>s3.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>mapred.tasktracker.dns.nameserver</name><value>default</value><source>mapred-default.xml</source></property>
<property><name>hive.mapjoin.localtask.max.memory.usage</name><value>0.9</value><source>programatically</source></property>
<property><name>mapred.submit.replication</name><value>10</value><source>mapred-default.xml</source></property>
<property><name>hive.limit.row.max.size</name><value>100000</value><source>programatically</source></property>
<property><name>hive.querylog.location</name><value>/tmp/hive</value><source>programatically</source></property>
<property><name>mapreduce.task.io.sort.mb</name><value>100</value><source>mapred-site.xml</source></property>
<property><name>hive.mapjoin.followby.map.aggr.hash.percentmemory</name><value>0.3</value><source>programatically</source></property>
<property><name>hive.multigroupby.singlereducer</name><value>true</value><source>programatically</source></property>
<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec</value><source>core-site.xml</source></property>
<property><name>hive.optimize.index.filter</name><value>false</value><source>programatically</source></property>
<property><name>hive.optimize.listbucketing</name><value>false</value><source>programatically</source></property>
<property><name>io.file.buffer.size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>hive.exec.script.maxerrsize</name><value>100000</value><source>programatically</source></property>
<property><name>mapred.map.tasks.speculative.execution</name><value>true</value><source>mapred-default.xml</source></property>
<property><name>hive.security.authorization.createtable.owner.grants</name><value/><source>programatically</source></property>
<property><name>hive.hadoop.supports.splittable.combineinputformat</name><value>false</value><source>programatically</source></property>
<property><name>hive.table.name</name><value/><source>programatically</source></property>
<property><name>mapred.map.child.log.level</name><value>INFO</value><source>mapred-default.xml</source></property>
<property><name>kfs.replication</name><value>3</value><source>core-default.xml</source></property>
<property><name>hive.exec.perf.logger</name><value>org.apache.hadoop.hive.ql.log.PerfLogger</value><source>programatically</source></property>
<property><name>hive.exec.mode.local.auto.input.files.max</name><value>4</value><source>programatically</source></property>
<property><name>mapred.map.max.attempts</name><value>4</value><source>mapred-default.xml</source></property>
<property><name>hive.security.authorization.createtable.user.grants</name><value/><source>programatically</source></property>
<property><name>kfs.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>mapred.job.shuffle.merge.percent</name><value>0.66</value><source>mapred-default.xml</source></property>
<property><name>hive.metastore.execute.setugi</name><value>false</value><source>programatically</source></property>
<property><name>fs.har.impl</name><value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value><source>programatically</source></property>
<property><name>hadoop.security.authentication</name><value>simple</value><source>core-default.xml</source></property>
<property><name>fs.s3.buffer.dir</name><value>${hadoop.tmp.dir}/s3</value><source>core-default.xml</source></property>
<property><name>io.compression.codec.lzo.class</name><value>com.hadoop.compression.lzo.LzoCodec</value><source>core-site.xml</source></property>
<property><name>mapred.skip.reduce.auto.incr.proc.count</name><value>true</value><source>mapred-default.xml</source></property>
<property><name>mapred.job.tracker.jobhistory.lru.cache.size</name><value>5</value><source>mapred-default.xml</source></property>
<property><name>hive.metastore.partition.inherit.table.properties</name><value/><source>programatically</source></property>
<property><name>hive.partition.name</name><value/><source>programatically</source></property>
<property><name>hive.optimize.groupby</name><value>true</value><source>programatically</source></property>
<property><name>hive.exec.tasklog.debug.timeout</name><value>20000</value><source>programatically</source></property>
<property><name>tfile.fs.input.buffer.size</name><value>262144</value><source>core-default.xml</source></property>
<property><name>mapreduce.job.acl-view-job</name><value> </value><source>mapred-default.xml</source></property>
<property><name>javax.jdo.option.NonTransactionalRead</name><value>true</value><source>programatically</source></property>
<property><name>mapred.job.queue.name</name><value>default</value><source>mapred-default.xml</source></property>
<property><name>hive.stats.atomic</name><value>false</value><source>programatically</source></property>
<property><name>hive.metastore.connect.retries</name><value>3</value><source>programatically</source></property>
<property><name>hive.stats.default.publisher</name><value/><source>programatically</source></property>
<property><name>ftp.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hive.map.groupby.sorted</name><value>false</value><source>programatically</source></property>
<property><name>hive.exec.post.hooks</name><value/><source>programatically</source></property>
<property><name>mapred.job.tracker.persist.jobstatus.hours</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>hive.warehouse.subdir.inherit.perms</name><value>false</value><source>programatically</source></property>
<property><name>hive.metadata.move.exported.metadata.to.trash</name><value>true</value><source>programatically</source></property>
<property><name>hive.exec.dynamic.partition</name><value>true</value><source>programatically</source></property>
<property><name>hive.stats.dbconnectionstring</name><value>jdbc:derby:;databaseName=TempStatsStore;create=true</value><source>programatically</source></property>
<property><name>mapreduce.tasktracker.cache.local.numberdirectories</name><value>10000</value><source>mapred-default.xml</source></property>
<property><name>hive.security.authenticator.manager</name><value>org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator</value><source>programatically</source></property>
<property><name>hive.client.stats.counters</name><value/><source>programatically</source></property>
<property><name>ipc.client.kill.max</name><value>10</value><source>core-default.xml</source></property>
<property><name>mapred.healthChecker.script.timeout</name><value>600000</value><source>mapred-default.xml</source></property>
<property><name>hive.lock.sleep.between.retries</name><value>60</value><source>programatically</source></property>
<property><name>hive.exec.reducers.max</name><value>999</value><source>programatically</source></property>
<property><name>mapred.tasktracker.map.tasks.maximum</name><value>12</value><source>mapred-site.xml</source></property>
<property><name>hive.limit.optimize.limit.file</name><value>10</value><source>programatically</source></property>
<property><name>hive.jobname.length</name><value>50</value><source>programatically</source></property>
<property><name>datanucleus.cache.level2</name><value>false</value><source>programatically</source></property>
<property><name>jobclient.completion.poll.interval</name><value>5000</value><source>mapred-default.xml</source></property>
<property><name>mapred.job.tracker.persist.jobstatus.dir</name><value>/jobtracker/jobsInfo</value><source>mapred-default.xml</source></property>
<property><name>hive.mapred.supports.subdirectories</name><value>false</value><source>programatically</source></property>
<property><name>mapreduce.shuffle.ssl.port</name><value>50443</value><source>mapred-default.xml</source></property>
<property><name>kfs.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>mapred.reduce.slowstart.completed.maps</name><value>0.05</value><source>mapred-default.xml</source></property>
<property><name>hadoop.http.filter.initializers</name><value>org.apache.hadoop.http.lib.StaticUserWebFilter</value><source>core-default.xml</source></property>
<property><name>hive.security.authorization.enabled</name><value>false</value><source>programatically</source></property>
<property><name>io.sort.mb</name><value>100</value><source>mapred-default.xml</source></property>
<property><name>hive.enforce.sortmergebucketmapjoin</name><value>false</value><source>programatically</source></property>
<property><name>hive.alias</name><value/><source>programatically</source></property>
<property><name>hive.cli.prompt</name><value>hive</value><source>programatically</source></property>
<property><name>hadoop.http.authentication.type</name><value>simple</value><source>core-default.xml</source></property>
<property><name>hive.optimize.index.autoupdate</name><value>false</value><source>programatically</source></property>
<property><name>ipc.server.listen.queue.size</name><value>128</value><source>core-default.xml</source></property>
<property><name>file.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>hive.metastore.authorization.storage.checks</name><value>false</value><source>programatically</source></property>
<property><name>hive.script.operator.truncate.env</name><value>false</value><source>programatically</source></property>
<property><name>hive.insert.into.external.tables</name><value>true</value><source>programatically</source></property>
<property><name>hive.script.operator.id.env.var</name><value>HIVE_SCRIPT_OPERATOR_ID</value><source>programatically</source></property>
<property><name>io.mapfile.bloom.size</name><value>1048576</value><source>core-default.xml</source></property>
<property><name>hive.metastore.rawstore.impl</name><value>org.apache.hadoop.hive.metastore.ObjectStore</value><source>programatically</source></property>
<property><name>hive.stats.retries.max</name><value>0</value><source>programatically</source></property>
<property><name>ftp.replication</name><value>3</value><source>core-default.xml</source></property>
<property><name>mapred.child.java.opts</name><value>-Xmx3048m</value><source>mapred-site.xml</source></property>
<property><name>hive.server.read.socket.timeout</name><value>10</value><source>programatically</source></property>
<property><name>mapred.queue.default.state</name><value>RUNNING</value><source>mapred-default.xml</source></property>
<property><name>map.sort.class</name><value>org.apache.hadoop.util.QuickSort</value><source>mapred-default.xml</source></property>
<property><name>hive.exec.dynamic.partition.mode</name><value>strict</value><source>programatically</source></property>
<property><name>mapred.jobtracker.instrumentation</name><value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value><source>mapred-default.xml</source></property>
<property><name>hive.security.authorization.createtable.role.grants</name><value/><source>programatically</source></property>
<property><name>hadoop.util.hash.type</name><value>murmur</value><source>core-default.xml</source></property>
<property><name>hive.fetch.output.serde</name><value>org.apache.hadoop.hive.serde2.DelimitedJSONSerDe</value><source>programatically</source></property>
<property><name>hive.cluster.delegation.token.store.class</name><value>org.apache.hadoop.hive.thrift.MemoryTokenStore</value><source>programatically</source></property>
<property><name>mapred.output.compression.type</name><value>RECORD</value><source>mapred-default.xml</source></property>
<property><name>hive.sample.seednumber</name><value>0</value><source>programatically</source></property>
<property><name>mapred.skip.attempts.to.start.skipping</name><value>2</value><source>mapred-default.xml</source></property>
<property><name>kfs.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
<property><name>ha.zookeeper.acl</name><value>world:anyone:rwcda</value><source>core-default.xml</source></property>
<property><name>hive.metastore.fs.handler.class</name><value>org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl</value><source>programatically</source></property>
<property><name>io.map.index.skip</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>hive.enforce.sorting</name><value>false</value><source>programatically</source></property>
<property><name>hive.security.metastore.authorization.manager</name><value>org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider</value><source>programatically</source></property>
<property><name>hive.hashtable.loadfactor</name><value>0.75</value><source>programatically</source></property>
<property><name>net.topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.ScriptBasedMapping</value><source>core-default.xml</source></property>
<property><name>hive.metastore.server.tcp.keepalive</name><value>true</value><source>programatically</source></property>
<property><name>hive.exec.failure.hooks</name><value/><source>programatically</source></property>
<property><name>mapred.cluster.max.map.memory.mb</name><value>-1</value><source>mapred-default.xml</source></property>
<property><name>fs.s3.maxRetries</name><value>4</value><source>core-default.xml</source></property>
<property><name>s3native.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
<property><name>mapred.task.tracker.task-controller</name><value>org.apache.hadoop.mapred.DefaultTaskController</value><source>mapred-default.xml</source></property>
<property><name>mapred.userlog.limit.kb</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>hadoop.http.staticuser.user</name><value>dr.who</value><source>core-default.xml</source></property>
<property><name>hive.script.auto.progress</name><value>false</value><source>programatically</source></property>
<property><name>ha.zookeeper.quorum</name><value>rc01m01:2181,rc01m02:2181,rc01m03:2181</value><source>core-site.xml</source></property>
<property><name>hive.metastore.pre.event.listeners</name><value/><source>programatically</source></property>
<property><name>mapreduce.ifile.readahead.bytes</name><value>4194304</value><source>mapred-default.xml</source></property>
<property><name>hive.mapred.local.mem</name><value>0</value><source>programatically</source></property>
<property><name>hive.metastore.kerberos.principal</name><value>hive-metastore/_HOST@EXAMPLE.COM</value><source>programatically</source></property>
<property><name>hadoop.http.authentication.simple.anonymous.allowed</name><value>true</value><source>core-default.xml</source></property>
<property><name>hive.optimize.ppd</name><value>true</value><source>programatically</source></property>
<property><name>hadoop.rpc.socket.factory.class.default</name><value>org.apache.hadoop.net.StandardSocketFactory</value><source>core-default.xml</source></property>
<property><name>mapred.input.dir.recursive</name><value>false</value><source>programatically</source></property>
<property><name>hive.exec.plan</name><value/><source>programatically</source></property>
<property><name>hive.server.tcp.keepalive</name><value>true</value><source>programatically</source></property>
<property><name>hive.exec.parallel</name><value>false</value><source>programatically</source></property>
<property><name>hive.metastore.ds.connection.url.hook</name><value/><source>programatically</source></property>
<property><name>hive.exec.max.created.files</name><value>100000</value><source>programatically</source></property>
<property><name>fs.automatic.close</name><value>true</value><source>core-default.xml</source></property>
<property><name>hive.stats.jdbcdriver</name><value>org.apache.derby.jdbc.EmbeddedDriver</value><source>programatically</source></property>
<property><name>mapred.map.tasks</name><value>2</value><source>mapred-default.xml</source></property>
<property><name>hive.script.recordreader</name><value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value><source>programatically</source></property>
<property><name>mapred.local.dir.minspacekill</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>hive.security.authorization.createtable.group.grants</name><value/><source>programatically</source></property>
<property><name>mapred.job.map.memory.mb</name><value>-1</value><source>mapred-default.xml</source></property>
<property><name>hive.index.compact.binary.search</name><value>true</value><source>programatically</source></property>
<property><name>hive.mergejob.maponly</name><value>true</value><source>programatically</source></property>
<property><name>mapred.jobtracker.completeuserjobs.maximum</name><value>100</value><source>mapred-default.xml</source></property>
<property><name>mapreduce.jobtracker.split.metainfo.maxsize</name><value>10000000</value><source>mapred-default.xml</source></property>
<property><name>hive.auto.progress.timeout</name><value>0</value><source>programatically</source></property>
<property><name>hive.optimize.index.filter.compact.maxsize</name><value>-1</value><source>programatically</source></property>
<property><name>jobclient.progress.monitor.poll.interval</name><value>1000</value><source>mapred-default.xml</source></property>
<property><name>javax.jdo.option.ConnectionPassword</name><value>mine</value><source>programatically</source></property>
<property><name>ftp.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>hive.mapjoin.cache.numrows</name><value>25000</value><source>programatically</source></property>
<property><name>hadoop.security.group.mapping.ldap.search.attr.member</name><value>member</value><source>core-default.xml</source></property>
<property><name>hive.metastore.archive.intermediate.original</name><value>_INTERMEDIATE_ORIGINAL</value><source>programatically</source></property>
<property><name>mapred.min.split.size</name><value>1</value><source>programatically</source></property>
<property><name>hadoop.http.authentication.token.validity</name><value>36000</value><source>core-default.xml</source></property>
<property><name>hive.groupby.mapaggr.checkinterval</name><value>100000</value><source>programatically</source></property>
<property><name>mapred.output.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value><source>mapred-default.xml</source></property>
<property><name>hive.metastore.cache.pinobjtypes</name><value>Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order</value><source>programatically</source></property>
<property><name>mapred.cluster.max.reduce.memory.mb</name><value>-1</value><source>mapred-default.xml</source></property>
<property><name>mapred.cluster.reduce.memory.mb</name><value>-1</value><source>mapred-default.xml</source></property>
<property><name>s3native.replication</name><value>3</value><source>core-default.xml</source></property>
<property><name>mapred.task.profile</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>mapred.reduce.parallel.copies</name><value>5</value><source>mapred-default.xml</source></property>
<property><name>dfs.ha.fencing.ssh.connect-timeout</name><value>30000</value><source>core-default.xml</source></property>
<property><name>hive.stats.collect.rawdatasize</name><value>true</value><source>programatically</source></property>
<property><name>hive.exec.local.scratchdir</name><value>/tmp/hive</value><source>programatically</source></property>
<property><name>datanucleus.plugin.pluginRegistryBundleCheck</name><value>LOG</value><source>programatically</source></property>
<property><name>hive.exec.rowoffset</name><value>false</value><source>programatically</source></property>
<property><name>hive.metastore.server.max.threads</name><value>100000</value><source>programatically</source></property>
<property><name>local.cache.size</name><value>10737418240</value><source>mapred-default.xml</source></property>
<property><name>hive.hwi.listen.host</name><value>0.0.0.0</value><source>programatically</source></property>
<property><name>hive.exec.reducers.bytes.per.reducer</name><value>1000000000</value><source>programatically</source></property>
<property><name>hive.exec.mode.local.auto</name><value>false</value><source>programatically</source></property>
<property><name>io.sort.factor</name><value>10</value><source>mapred-default.xml</source></property>
<property><name>hive.exec.script.trust</name><value>false</value><source>programatically</source></property>
<property><name>hive.exec.job.debug.capture.stacktraces</name><value>true</value><source>programatically</source></property>
<property><name>hive.downloaded.resources.dir</name><value>/tmp/hive/hive_resources</value><source>programatically</source></property>
<property><name>hive.metastore.force.reload.conf</name><value>false</value><source>programatically</source></property>
<property><name>kfs.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hive.aux.jars.path</name><value/><source>programatically</source></property>
<property><name>mapred.task.timeout</name><value>600000</value><source>mapred-default.xml</source></property>
<property><name>hive.archive.enabled</name><value>false</value><source>programatically</source></property>
<property><name>hive.metastore.kerberos.keytab.file</name><value/><source>programatically</source></property>
<property><name>hive.metastore.client.connect.retry.delay</name><value>1</value><source>programatically</source></property>
<property><name>hive.zookeeper.quorum</name><value/><source>programatically</source></property>
<property><name>datanucleus.autoCreateSchema</name><value>true</value><source>programatically</source></property>
<property><name>hive.querylog.enable.plan.progress</name><value>true</value><source>programatically</source></property>
<property><name>hive.semantic.analyzer.hook</name><value/><source>programatically</source></property>
<property><name>hive.lock.manager</name><value>org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager</value><source>programatically</source></property>
<property><name>hive.exec.submitviachild</name><value>false</value><source>programatically</source></property>
<property><name>hive.entity.separator</name><value>@</value><source>programatically</source></property>
<property><name>hive.stats.ndv.error</name><value>20.0</value><source>programatically</source></property>
<property><name>hive.merge.mapfiles</name><value>true</value><source>programatically</source></property>
<property><name>ipc.client.idlethreshold</name><value>4000</value><source>core-default.xml</source></property>
<property><name>hive.exec.concatenate.check.index</name><value>true</value><source>programatically</source></property>
<property><name>ipc.server.tcpnodelay</name><value>false</value><source>core-default.xml</source></property>
<property><name>ftp.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>hive.zookeeper.clean.extra.nodes</name><value>false</value><source>programatically</source></property>
<property><name>hive.support.concurrency</name><value>false</value><source>programatically</source></property>
<property><name>s3.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>hive.exec.default.partition.name</name><value>__HIVE_DEFAULT_PARTITION__</value><source>programatically</source></property>
<property><name>hive.cli.errors.ignore</name><value>false</value><source>programatically</source></property>
<property><name>hive.debug.localtask</name><value>false</value><source>programatically</source></property>
<property><name>mapred.heartbeats.in.second</name><value>100</value><source>mapred-default.xml</source></property>
<property><name>fs.s3.block.size</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hive.unlock.numretries</name><value>10</value><source>programatically</source></property>
<property><name>hive.exec.counters.pull.interval</name><value>1000</value><source>programatically</source></property>
<property><name>mapred.map.output.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value><source>mapred-default.xml</source></property>
<property><name>hadoop.rpc.protection</name><value>authentication</value><source>core-default.xml</source></property>
<property><name>mapred.task.cache.levels</name><value>2</value><source>mapred-default.xml</source></property>
<property><name>hive.groupby.skewindata</name><value>false</value><source>programatically</source></property>
<property><name>mapred.tasktracker.dns.interface</name><value>default</value><source>mapred-default.xml</source></property>
<property><name>hadoop.proxyuser.hue.hosts</name><value>*</value><source>core-site.xml</source></property>
<property><name>ftp.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
<property><name>fs.defaultFS</name><value>hdfs://hadoop-cluster</value><source>core-site.xml</source></property>
<property><name>hive.mapred.partitioner</name><value>org.apache.hadoop.hive.ql.io.DefaultHivePartitioner</value><source>programatically</source></property>
<property><name>datanucleus.cache.level2.type</name><value>none</value><source>programatically</source></property>
<property><name>hive.metastore.metadb.dir</name><value/><source>programatically</source></property>
<property><name>file.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
<property><name>mapred.job.reduce.memory.mb</name><value>-1</value><source>mapred-default.xml</source></property>
<property><name>hive.zookeeper.client.port</name><value>2181</value><source>programatically</source></property>
<property><name>mapred.max.tracker.failures</name><value>4</value><source>mapred-default.xml</source></property>
<property><name>fs.trash.checkpoint.interval</name><value>60</value><source>core-site.xml</source></property>
<property><name>hadoop.http.authentication.signature.secret.file</name><value>${user.home}/hadoop-http-auth-signature-secret</value><source>core-default.xml</source></property>
<property><name>hive.metastore.failure.retries</name><value>1</value><source>programatically</source></property>
<property><name>s3native.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>hive.mapjoin.check.memory.rows</name><value>100000</value><source>programatically</source></property>
<property><name>mapreduce.reduce.shuffle.read.timeout</name><value>180000</value><source>mapred-default.xml</source></property>
<property><name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name><value>5000</value><source>mapred-default.xml</source></property>
<property><name>hive.index.compact.file.ignore.hdfs</name><value>false</value><source>programatically</source></property>
<property><name>hive.metastore.partition.name.whitelist.pattern</name><value/><source>programatically</source></property>
<property><name>hive.stats.dbclass</name><value>jdbc:derby</value><source>programatically</source></property>
<property><name>hive.task.progress</name><value>false</value><source>programatically</source></property>
<property><name>datanucleus.storeManagerType</name><value>rdbms</value><source>programatically</source></property>
<property><name>mapred.max.tracker.blacklists</name><value>4</value><source>mapred-default.xml</source></property>
<property><name>hive.exec.pre.hooks</name><value/><source>programatically</source></property>
<property><name>hive.session.id</name><value/><source>programatically</source></property>
<property><name>hive.optimize.cp</name><value>true</value><source>programatically</source></property>
<property><name>hadoop.common.configuration.version</name><value>0.23.0</value><source>core-default.xml</source></property>
<property><name>jobclient.output.filter</name><value>FAILED</value><source>mapred-default.xml</source></property>
<property><name>hadoop.security.group.mapping.ldap.ssl</name><value>false</value><source>core-default.xml</source></property>
<property><name>mapreduce.ifile.readahead</name><value>true</value><source>mapred-default.xml</source></property>
<property><name>io.serializations</name><value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value><source>core-default.xml</source></property>
<property><name>hive.security.metastore.authenticator.manager</name><value>org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator</value><source>programatically</source></property>
<property><name>hive.metastore.event.expiry.duration</name><value>0</value><source>programatically</source></property>
<property><name>fs.df.interval</name><value>60000</value><source>core-default.xml</source></property>
<property><name>hive.exim.uri.scheme.whitelist</name><value>hdfs,pfile</value><source>programatically</source></property>
<property><name>javax.jdo.option.Multithreaded</name><value>true</value><source>programatically</source></property>
<property><name>hive.table.parameters.default</name><value/><source>programatically</source></property>
<property><name>io.seqfile.compress.blocksize</name><value>1000000</value><source>core-default.xml</source></property>
<property><name>mapred.jobtracker.taskScheduler</name><value>org.apache.hadoop.mapred.FairScheduler</value><source>mapred-site.xml</source></property>
<property><name>job.end.retry.attempts</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>ipc.client.connect.max.retries</name><value>10</value><source>core-default.xml</source></property>
<property><name>hadoop.security.groups.cache.secs</name><value>300</value><source>core-default.xml</source></property>
<property><name>hive.optimize.index.groupby</name><value>false</value><source>programatically</source></property>
<property><name>hive.conf.validation</name><value>true</value><source>programatically</source></property>
<property><name>hive.map.aggr</name><value>true</value><source>programatically</source></property>
<property><name>mapred.tasktracker.indexcache.mb</name><value>10</value><source>mapred-default.xml</source></property>
<property><name>hadoop.security.group.mapping.ldap.search.filter.user</name><value>(&amp;(objectClass=user)(sAMAccountName={0}))</value><source>core-default.xml</source></property>
<property><name>mapreduce.reduce.input.limit</name><value>-1</value><source>mapred-default.xml</source></property>
<property><name>hive.test.mode.samplefreq</name><value>32</value><source>programatically</source></property>
<property><name>datanucleus.validateTables</name><value>false</value><source>programatically</source></property>
<property><name>hive.exec.scratchdir</name><value>/tmp/hive-hive</value><source>programatically</source></property>
<property><name>hive.optimize.bucketmapjoin.sortedmerge</name><value>false</value><source>programatically</source></property>
<property><name>tasktracker.http.threads</name><value>40</value><source>mapred-default.xml</source></property>
<property><name>hive.exec.max.dynamic.partitions.pernode</name><value>100</value><source>programatically</source></property>
<property><name>fs.s3n.block.size</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>mapred.job.tracker.handler.count</name><value>10</value><source>mapred-default.xml</source></property>
<property><name>hive.ddl.createtablelike.properties.whitelist</name><value/><source>programatically</source></property>
<property><name>fs.ftp.host</name><value>0.0.0.0</value><source>core-default.xml</source></property>
<property><name>keep.failed.task.files</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>mapred.output.compress</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>hadoop.security.group.mapping</name><value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value><source>core-default.xml</source></property>
<property><name>mapred.jobtracker.job.history.block.size</name><value>3145728</value><source>mapred-default.xml</source></property>
<property><name>hive.metastore.archive.intermediate.extracted</name><value>_INTERMEDIATE_EXTRACTED</value><source>programatically</source></property>
<property><name>mapred.skip.reduce.max.skip.groups</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>hive.stats.autogather</name><value>true</value><source>programatically</source></property>
<property><name>hive.rework.mapredwork</name><value>false</value><source>programatically</source></property>
<property><name>hive.exec.script.allow.partial.consumption</name><value>false</value><source>programatically</source></property>
<property><name>file.replication</name><value>1</value><source>core-default.xml</source></property>
<property><name>hive.autogen.columnalias.prefix.label</name><value>_c</value><source>programatically</source></property>
<property><name>hive.auto.convert.join</name><value>false</value><source>programatically</source></property>
<property><name>hadoop.work.around.non.threadsafe.getpwuid</name><value>false</value><source>core-default.xml</source></property>
<property><name>hive.exec.compress.output</name><value>false</value><source>programatically</source></property>
<property><name>hadoop.tmp.dir</name><value>/tmp/hadoop-${user.name}</value><source>core-site.xml</source></property>
<property><name>mapred.line.input.format.linespermap</name><value>1</value><source>mapred-default.xml</source></property>
<property><name>hadoop.kerberos.kinit.command</name><value>kinit</value><source>core-default.xml</source></property>
<property><name>hive.internal.ddl.list.bucketing.enable</name><value>false</value><source>programatically</source></property>
<property><name>mapred.min.split.size.per.node</name><value>1</value><source>programatically</source></property>
<property><name>file.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>hive.metastore.sasl.enabled</name><value>false</value><source>programatically</source></property>
<property><name>mapred.local.dir.minspacestart</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>mapred.jobtracker.maxtasks.per.job</name><value>-1</value><source>mapred-default.xml</source></property>
<property><name>hive.hbase.wal.enabled</name><value>true</value><source>programatically</source></property>
<property><name>hive.metastore.event.listeners</name><value/><source>programatically</source></property>
<property><name>hive.hashtable.initialCapacity</name><value>100000</value><source>programatically</source></property>
<property><name>hive.test.mode</name><value>false</value><source>programatically</source></property>
<property><name>mapred.user.jobconf.limit</name><value>5242880</value><source>mapred-default.xml</source></property>
<property><name>mapred.reduce.max.attempts</name><value>4</value><source>mapred-default.xml</source></property>
<property><name>hive.stats.retries.wait</name><value>3000</value><source>programatically</source></property>
<property><name>hive.added.jars.path</name><value/><source>programatically</source></property>
<property><name>net.topology.script.number.args</name><value>100</value><source>core-default.xml</source></property>
<property><name>hive.optimize.bucketmapjoin</name><value>false</value><source>programatically</source></property>
<property><name>datanucleus.validateColumns</name><value>false</value><source>programatically</source></property>
<property><name>mapred.job.tracker</name><value>rc01m02:8021</value><source>mapred-site.xml</source></property>
<property><name>hive.mapjoin.bucket.cache.size</name><value>100</value><source>programatically</source></property>
<property><name>hadoop.ssl.hostname.verifier</name><value>DEFAULT</value><source>core-default.xml</source></property>
<property><name>hive.metastore.client.socket.timeout</name><value>20</value><source>programatically</source></property>
<property><name>mapred.tasktracker.instrumentation</name><value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value><source>mapred-default.xml</source></property>
<property><name>io.mapfile.bloom.error.rate</name><value>0.005</value><source>core-default.xml</source></property>
<property><name>mapred.tasktracker.expiry.interval</name><value>600000</value><source>mapred-default.xml</source></property>
<property><name>hadoop.proxyuser.hue.groups</name><value>*</value><source>core-site.xml</source></property>
<property><name>io.sort.record.percent</name><value>0.05</value><source>mapred-default.xml</source></property>
<property><name>hive.exec.list.bucketing.default.dir</name><value>HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME</value><source>programatically</source></property>
<property><name>hive.lock.mapred.only.operation</name><value>false</value><source>programatically</source></property>
<property><name>hive.map.aggr.hash.percentmemory</name><value>0.5</value><source>programatically</source></property>
<property><name>hive.hmshandler.retry.attempts</name><value>1</value><source>programatically</source></property>
<property><name>mapred.job.tracker.persist.jobstatus.active</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>hive.merge.rcfile.block.level</name><value>true</value><source>programatically</source></property>
<property><name>io.seqfile.local.dir</name><value>${hadoop.tmp.dir}/io/local</value><source>core-default.xml</source></property>
<property><name>hive.metastore.uris</name><value/><source>programatically</source></property>
<property><name>tfile.io.chunk.size</name><value>1048576</value><source>core-default.xml</source></property>
<property><name>file.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hive.autogen.columnalias.prefix.includefuncname</name><value>false</value><source>programatically</source></property>
<property><name>mapreduce.job.acl-modify-job</name><value> </value><source>mapred-default.xml</source></property>
<property><name>hive.exec.compress.intermediate</name><value>false</value><source>programatically</source></property>
<property><name>hive.fetch.task.conversion</name><value>minimal</value><source>programatically</source></property>
<property><name>hive.lock.numretries</name><value>100</value><source>programatically</source></property>
<property><name>hive.variable.substitute.depth</name><value>40</value><source>programatically</source></property>
<property><name>hive.default.fileformat</name><value>TextFile</value><source>programatically</source></property>
<property><name>hive.metastore.end.function.listeners</name><value/><source>programatically</source></property>
<property><name>io.skip.checksum.errors</name><value>false</value><source>core-default.xml</source></property>
<property><name>fs.ftp.host.port</name><value>21</value><source>core-default.xml</source></property>
<property><name>fs.AbstractFileSystem.viewfs.impl</name><value>org.apache.hadoop.fs.viewfs.ViewFs</value><source>core-default.xml</source></property>
<property><name>mapred.temp.dir</name><value>${hadoop.tmp.dir}/mapred/temp</value><source>mapred-default.xml</source></property>
<property><name>hive.fileformat.check</name><value>true</value><source>programatically</source></property>
<property><name>hive.merge.smallfiles.avgsize</name><value>16000000</value><source>programatically</source></property>
<property><name>hive.metadata.export.location</name><value/><source>programatically</source></property>
<property><name>hive.query.string</name><value/><source>programatically</source></property>
<property><name>hadoop.ssl.require.client.cert</name><value>false</value><source>core-default.xml</source></property>
<property><name>mapred.job.reduce.input.buffer.percent</name><value>0.0</value><source>mapred-default.xml</source></property>
</configuration>
hive-run.log
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
Starting Hive Thrift Server
This usage has been deprecated, consider using the new command line syntax (run with -h to see usage information)
WARNING: org.apache.hadoop.metrics.jvm.EventCounter is deprecated. Please use org.apache.hadoop.log.metrics.EventCounter in all the log4j.properties files.
13/02/25 09:54:41 WARN conf.HiveConf: DEPRECATED: Configuration property hive.metastore.local no longer has any effect. Make sure to provide a valid value for hive.metastore.uris if you are connecting to a remote metastore.
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/lib/zookeeper/lib/slf4j-log4j12-1.6.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/var/lib/hive/hive-0.10.0-bin/lib/slf4j-log4j12-1.6.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
13/02/25 09:54:41 INFO service.HiveServer: Starting hive server on port 11000 with 100 min worker threads and 2147483647 max worker threads
13/02/25 09:54:41 INFO service.HiveServer: TCP keepalive = true
13/02/25 09:54:49 INFO metastore.HiveMetaStore: 0: Opening raw store with implemenation class:org.apache.hadoop.hive.metastore.ObjectStore
13/02/25 09:54:49 INFO metastore.ObjectStore: ObjectStore, initialize called
13/02/25 09:54:51 WARN conf.HiveConf: DEPRECATED: Configuration property hive.metastore.local no longer has any effect. Make sure to provide a valid value for hive.metastore.uris if you are connecting to a remote metastore.
13/02/25 09:54:51 INFO metastore.ObjectStore: Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes="Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order"
13/02/25 09:54:51 INFO metastore.ObjectStore: Initialized ObjectStore
Hive history file=/tmp/hive/hive_job_log_hive_201302250954_1991464361.txt
13/02/25 09:54:53 INFO exec.HiveHistory: Hive history file=/tmp/hive/hive_job_log_hive_201302250954_1991464361.txt
13/02/25 09:54:53 WARN conf.HiveConf: DEPRECATED: Configuration property hive.metastore.local no longer has any effect. Make sure to provide a valid value for hive.metastore.uris if you are connecting to a remote metastore.
13/02/25 09:54:53 INFO service.HiveServer: Putting temp output to file /tmp/hive/hive_2013022509543635105834228218346.pipeout
13/02/25 09:54:53 INFO service.HiveServer: Running the query: set hive.fetch.output.serde = org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
13/02/25 09:54:53 INFO service.HiveServer: Putting temp output to file /tmp/hive/hive_2013022509543635105834228218346.pipeout
13/02/25 09:55:02 INFO service.HiveServer: Running the query: use hdp_sand
13/02/25 09:55:02 INFO ql.Driver: <PERFLOG method=Driver.run>
13/02/25 09:55:02 INFO ql.Driver: <PERFLOG method=TimeToSubmit>
13/02/25 09:55:02 INFO ql.Driver: <PERFLOG method=compile>
13/02/25 09:55:02 INFO parse.ParseDriver: Parsing command: use hdp_sand
13/02/25 09:55:02 INFO parse.ParseDriver: Parse Completed
13/02/25 09:55:02 INFO ql.Driver: Semantic Analysis Completed
13/02/25 09:55:02 INFO ql.Driver: Returning Hive schema: Schema(fieldSchemas:null, properties:null)
13/02/25 09:55:02 INFO ql.Driver: <PERFLOG method=doAuthorization>
13/02/25 09:55:02 INFO ql.Driver: </PERFLOG method=doAuthorization start=1361782502704 end=1361782502704 duration=0>
13/02/25 09:55:02 INFO ql.Driver: </PERFLOG method=compile start=1361782502525 end=1361782502704 duration=179>
13/02/25 09:55:02 INFO ql.Driver: <PERFLOG method=Driver.execute>
13/02/25 09:55:02 INFO ql.Driver: Starting command: use hdp_sand
13/02/25 09:55:02 INFO ql.Driver: </PERFLOG method=TimeToSubmit start=1361782502525 end=1361782502714 duration=189>
13/02/25 09:55:02 INFO metastore.HiveMetaStore: 0: get_database: hdp_sand
13/02/25 09:55:02 INFO HiveMetaStore.audit: ugi=hive ip=unknown-ip-addr cmd=get_database: hdp_sand
13/02/25 09:55:02 INFO metastore.HiveMetaStore: 0: Opening raw store with implemenation class:org.apache.hadoop.hive.metastore.ObjectStore
13/02/25 09:55:02 INFO metastore.ObjectStore: ObjectStore, initialize called
13/02/25 09:55:02 INFO metastore.ObjectStore: Initialized ObjectStore
13/02/25 09:55:02 INFO metastore.HiveMetaStore: 0: get_database: hdp_sand
13/02/25 09:55:02 INFO HiveMetaStore.audit: ugi=hive ip=unknown-ip-addr cmd=get_database: hdp_sand
13/02/25 09:55:02 INFO ql.Driver: </PERFLOG method=Driver.execute start=1361782502704 end=1361782502789 duration=85>
OK
13/02/25 09:55:02 INFO ql.Driver: OK
13/02/25 09:55:02 INFO ql.Driver: <PERFLOG method=releaseLocks>
13/02/25 09:55:02 INFO ql.Driver: </PERFLOG method=releaseLocks start=1361782502790 end=1361782502790 duration=0>
13/02/25 09:55:02 INFO ql.Driver: </PERFLOG method=Driver.run start=1361782502525 end=1361782502790 duration=265>
13/02/25 09:55:02 INFO service.HiveServer: Returning schema: Schema(fieldSchemas:null, properties:null)
13/02/25 09:55:14 INFO service.HiveServer: Running the query: select count(*) from page_view
13/02/25 09:55:14 INFO ql.Driver: <PERFLOG method=Driver.run>
13/02/25 09:55:14 INFO ql.Driver: <PERFLOG method=TimeToSubmit>
13/02/25 09:55:14 INFO ql.Driver: <PERFLOG method=compile>
13/02/25 09:55:14 INFO parse.ParseDriver: Parsing command: select count(*) from page_view
13/02/25 09:55:14 INFO parse.ParseDriver: Parse Completed
13/02/25 09:55:15 INFO parse.SemanticAnalyzer: Starting Semantic Analysis
13/02/25 09:55:15 INFO parse.SemanticAnalyzer: Completed phase 1 of Semantic Analysis
13/02/25 09:55:15 INFO parse.SemanticAnalyzer: Get metadata for source tables
13/02/25 09:55:15 INFO metastore.HiveMetaStore: 0: get_table : db=hdp_sand tbl=page_view
13/02/25 09:55:15 INFO HiveMetaStore.audit: ugi=hive ip=unknown-ip-addr cmd=get_table : db=hdp_sand tbl=page_view
13/02/25 09:55:15 INFO parse.SemanticAnalyzer: Get metadata for subqueries
13/02/25 09:55:15 INFO parse.SemanticAnalyzer: Get metadata for destination tables
13/02/25 09:55:16 INFO parse.SemanticAnalyzer: Completed getting MetaData in Semantic Analysis
13/02/25 09:55:16 INFO ppd.OpProcFactory: Processing for FS(6)
13/02/25 09:55:16 INFO ppd.OpProcFactory: Processing for SEL(5)
13/02/25 09:55:16 INFO ppd.OpProcFactory: Processing for GBY(4)
13/02/25 09:55:16 INFO ppd.OpProcFactory: Processing for RS(3)
13/02/25 09:55:16 INFO ppd.OpProcFactory: Processing for GBY(2)
13/02/25 09:55:16 INFO ppd.OpProcFactory: Processing for SEL(1)
13/02/25 09:55:16 INFO ppd.OpProcFactory: Processing for TS(0)
13/02/25 09:55:16 INFO metastore.HiveMetaStore: 0: get_partitions_with_auth : db=hdp_sand tbl=page_view
13/02/25 09:55:16 INFO HiveMetaStore.audit: ugi=hive ip=unknown-ip-addr cmd=get_partitions_with_auth : db=hdp_sand tbl=page_view
13/02/25 09:55:17 INFO physical.MetadataOnlyOptimizer: Looking for table scans where optimization is applicable
13/02/25 09:55:17 INFO physical.MetadataOnlyOptimizer: Found 0 metadata only table scans
13/02/25 09:55:17 INFO parse.SemanticAnalyzer: Completed plan generation
13/02/25 09:55:17 INFO ql.Driver: Semantic Analysis Completed
13/02/25 09:55:17 INFO exec.ListSinkOperator: Initializing Self 7 OP
13/02/25 09:55:17 INFO exec.ListSinkOperator: Operator 7 OP initialized
13/02/25 09:55:17 INFO exec.ListSinkOperator: Initialization Done 7 OP
13/02/25 09:55:17 INFO ql.Driver: Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_c0, type:bigint, comment:null)], properties:null)
13/02/25 09:55:17 INFO ql.Driver: <PERFLOG method=doAuthorization>
13/02/25 09:55:17 INFO metastore.HiveMetaStore: 0: get_database: hdp_sand
13/02/25 09:55:17 INFO HiveMetaStore.audit: ugi=hive ip=unknown-ip-addr cmd=get_database: hdp_sand
13/02/25 09:55:17 INFO ql.Driver: </PERFLOG method=doAuthorization start=1361782517411 end=1361782517580 duration=169>
13/02/25 09:55:17 INFO ql.Driver: </PERFLOG method=compile start=1361782514977 end=1361782517580 duration=2603>
13/02/25 09:55:17 INFO ql.Driver: <PERFLOG method=Driver.execute>
13/02/25 09:55:17 INFO ql.Driver: Starting command: select count(*) from page_view
Total MapReduce jobs = 1
13/02/25 09:55:17 INFO ql.Driver: Total MapReduce jobs = 1
13/02/25 09:55:17 INFO ql.Driver: </PERFLOG method=TimeToSubmit start=1361782514977 end=1361782517590 duration=2613>
Launching Job 1 out of 1
13/02/25 09:55:17 INFO ql.Driver: Launching Job 1 out of 1
13/02/25 09:55:17 WARN conf.HiveConf: DEPRECATED: Configuration property hive.metastore.local no longer has any effect. Make sure to provide a valid value for hive.metastore.uris if you are connecting to a remote metastore.
Number of reduce tasks determined at compile time: 1
13/02/25 09:55:17 INFO exec.Task: Number of reduce tasks determined at compile time: 1
In order to change the average load for a reducer (in bytes):
13/02/25 09:55:17 INFO exec.Task: In order to change the average load for a reducer (in bytes):
set hive.exec.reducers.bytes.per.reducer=<number>
13/02/25 09:55:17 INFO exec.Task: set hive.exec.reducers.bytes.per.reducer=<number>
In order to limit the maximum number of reducers:
13/02/25 09:55:17 INFO exec.Task: In order to limit the maximum number of reducers:
set hive.exec.reducers.max=<number>
13/02/25 09:55:17 INFO exec.Task: set hive.exec.reducers.max=<number>
In order to set a constant number of reducers:
13/02/25 09:55:17 INFO exec.Task: In order to set a constant number of reducers:
set mapred.reduce.tasks=<number>
13/02/25 09:55:17 INFO exec.Task: set mapred.reduce.tasks=<number>
13/02/25 09:55:17 INFO exec.ExecDriver: Using org.apache.hadoop.hive.ql.io.CombineHiveInputFormat
13/02/25 09:55:17 INFO exec.ExecDriver: adding libjars: file:///var/lib/hive/hive-0.10.0-bin/lib/hive-builtins-0.10.0.jar,file:///usr/lib/hive/lib/hive-hbase-handler-0.9.0-cdh4.1.2.jar,file:///usr/lib/hive/lib/hbase.jar,file:///usr/lib/hive/lib/zookeeper.jar,file:///usr/lib/hive/lib/guava-11.0.2.jar
13/02/25 09:55:17 INFO exec.ExecDriver: Processing alias page_view
13/02/25 09:55:17 INFO exec.ExecDriver: Adding input file hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2012-02-05
13/02/25 09:55:17 INFO exec.Utilities: Content Summary not cached for hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2012-02-05
13/02/25 09:55:17 INFO exec.ExecDriver: Adding input file hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2012-02-06
13/02/25 09:55:17 INFO exec.Utilities: Content Summary not cached for hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2012-02-06
13/02/25 09:55:17 INFO exec.ExecDriver: Adding input file hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-03
13/02/25 09:55:17 INFO exec.Utilities: Content Summary not cached for hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-03
13/02/25 09:55:17 INFO exec.ExecDriver: Adding input file hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-04
13/02/25 09:55:17 INFO exec.Utilities: Content Summary not cached for hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-04
13/02/25 09:55:17 INFO exec.ExecDriver: Adding input file hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-05
13/02/25 09:55:17 INFO exec.Utilities: Content Summary not cached for hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-05
13/02/25 09:55:17 INFO exec.ExecDriver: Adding input file hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-06
13/02/25 09:55:17 INFO exec.Utilities: Content Summary not cached for hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-06
13/02/25 09:55:17 INFO exec.ExecDriver: Adding input file hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-07
13/02/25 09:55:17 INFO exec.Utilities: Content Summary not cached for hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-07
13/02/25 09:55:17 INFO exec.ExecDriver: Adding input file hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-08
13/02/25 09:55:17 INFO exec.Utilities: Content Summary not cached for hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-08
13/02/25 09:55:17 INFO exec.ExecDriver: Adding input file hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-09
13/02/25 09:55:17 INFO exec.Utilities: Content Summary not cached for hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-09
13/02/25 09:55:17 INFO exec.ExecDriver: Adding input file hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-10
13/02/25 09:55:17 INFO exec.Utilities: Content Summary not cached for hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-10
13/02/25 09:55:17 INFO exec.ExecDriver: Adding input file hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-11
13/02/25 09:55:17 INFO exec.Utilities: Content Summary not cached for hdfs://hadoop-cluster/user/hive/hdp_sand/page_view/dt=2013-02-11
13/02/25 09:55:17 INFO exec.ExecDriver: Changed input file to hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/1
13/02/25 09:55:17 INFO exec.ExecDriver: Changed input file to hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/2
13/02/25 09:55:17 INFO exec.ExecDriver: Changed input file to hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/3
13/02/25 09:55:17 INFO exec.ExecDriver: Changed input file to hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/4
13/02/25 09:55:17 INFO exec.ExecDriver: Changed input file to hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/5
13/02/25 09:55:18 INFO exec.ExecDriver: Changed input file to hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/6
13/02/25 09:55:18 INFO exec.ExecDriver: Changed input file to hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/7
13/02/25 09:55:18 INFO exec.ExecDriver: Changed input file to hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/8
13/02/25 09:55:18 INFO exec.ExecDriver: Changed input file to hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/9
13/02/25 09:55:18 INFO exec.ExecDriver: Changed input file to hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/10
13/02/25 09:55:18 INFO exec.ExecDriver: Changed input file to hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/11
13/02/25 09:55:19 INFO exec.ExecDriver: Making Temp Directory: hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-ext-10001
13/02/25 09:55:19 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
13/02/25 09:55:20 INFO lzo.GPLNativeCodeLoader: Loaded native gpl library
13/02/25 09:55:20 INFO lzo.LzoCodec: Successfully loaded & initialized native-lzo library [hadoop-lzo rev 56566259a76a3cf446ec946a6632268c7304af9f]
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: CombineHiveInputSplit creating pool for hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/1; using filter path hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/1
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: CombineHiveInputSplit: pool is already created for hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/2; using filter path hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/2
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: CombineHiveInputSplit: pool is already created for hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/3; using filter path hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/3
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: CombineHiveInputSplit: pool is already created for hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/4; using filter path hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/4
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: CombineHiveInputSplit: pool is already created for hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/5; using filter path hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/5
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: CombineHiveInputSplit: pool is already created for hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/6; using filter path hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/6
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: CombineHiveInputSplit: pool is already created for hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/7; using filter path hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/7
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: CombineHiveInputSplit: pool is already created for hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/8; using filter path hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/8
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: CombineHiveInputSplit: pool is already created for hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/9; using filter path hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/9
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: CombineHiveInputSplit: pool is already created for hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/10; using filter path hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/10
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: CombineHiveInputSplit: pool is already created for hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/11; using filter path hdfs://hadoop-cluster/tmp/hive-hive/hive_2013-02-25_09-55-14_977_6490532976687297475/-mr-10002/11
13/02/25 09:55:20 INFO mapred.FileInputFormat: Total input paths to process : 11
13/02/25 09:55:20 INFO io.CombineHiveInputFormat: number of splits 1
Starting Job = job_201302111523_0973, Tracking URL = http://rc01m02:50030/jobdetails.jsp?jobid=job_201302111523_0973
13/02/25 09:55:27 INFO exec.Task: Starting Job = job_201302111523_0973, Tracking URL = http://rc01m02:50030/jobdetails.jsp?jobid=job_201302111523_0973
Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_201302111523_0973
13/02/25 09:55:27 INFO exec.Task: Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_201302111523_0973
Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
13/02/25 09:55:30 INFO exec.Task: Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
13/02/25 09:55:30 WARN mapreduce.Counters: Group org.apache.hadoop.mapred.Task$Counter is deprecated. Use org.apache.hadoop.mapreduce.TaskCounter instead
2013-02-25 09:55:30,949 Stage-1 map = 0%, reduce = 0%
13/02/25 09:55:30 INFO exec.Task: 2013-02-25 09:55:30,949 Stage-1 map = 0%, reduce = 0%
13/02/25 09:56:31 WARN mapreduce.Counters: Group org.apache.hadoop.mapred.Task$Counter is deprecated. Use org.apache.hadoop.mapreduce.TaskCounter instead
2013-02-25 09:56:31,461 Stage-1 map = 0%, reduce = 0%
13/02/25 09:56:31 INFO exec.Task: 2013-02-25 09:56:31,461 Stage-1 map = 0%, reduce = 0%
13/02/25 09:57:02 WARN mapreduce.Counters: Group org.apache.hadoop.mapred.Task$Counter is deprecated. Use org.apache.hadoop.mapreduce.TaskCounter instead
2013-02-25 09:57:02,601 Stage-1 map = 100%, reduce = 100%
13/02/25 09:57:02 INFO exec.Task: 2013-02-25 09:57:02,601 Stage-1 map = 100%, reduce = 100%
13/02/25 09:57:02 WARN mapreduce.Counters: Group org.apache.hadoop.mapred.Task$Counter is deprecated. Use org.apache.hadoop.mapreduce.TaskCounter instead
Ended Job = job_201302111523_0973 with errors
13/02/25 09:57:02 ERROR exec.Task: Ended Job = job_201302111523_0973 with errors
Error during job, obtaining debugging information...
13/02/25 09:57:02 ERROR exec.Task: Error during job, obtaining debugging information...
Job Tracking URL: http://rc01m02:50030/jobdetails.jsp?jobid=job_201302111523_0973
13/02/25 09:57:02 ERROR exec.Task: Job Tracking URL: http://rc01m02:50030/jobdetails.jsp?jobid=job_201302111523_0973
Examining task ID: task_201302111523_0973_m_000002 (and more) from job job_201302111523_0973
13/02/25 09:57:02 ERROR exec.Task: Examining task ID: task_201302111523_0973_m_000002 (and more) from job job_201302111523_0973
Exception in thread "Thread-39" java.lang.NoClassDefFoundError: org/apache/hadoop/mapreduce/util/HostUtil
at org.apache.hadoop.hive.shims.Hadoop23Shims.getTaskAttemptLogUrl(Hadoop23Shims.java:53)
at org.apache.hadoop.hive.ql.exec.JobDebugger$TaskInfoGrabber.getTaskInfos(JobDebugger.java:186)
at org.apache.hadoop.hive.ql.exec.JobDebugger$TaskInfoGrabber.run(JobDebugger.java:142)
at java.lang.Thread.run(Thread.java:619)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.mapreduce.util.HostUtil
at java.net.URLClassLoader$1.run(URLClassLoader.java:200)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:188)
at java.lang.ClassLoader.loadClass(ClassLoader.java:307)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:301)
at java.lang.ClassLoader.loadClass(ClassLoader.java:252)
at java.lang.ClassLoader.loadClassInternal(ClassLoader.java:320)
... 4 more
FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.MapRedTask
13/02/25 09:57:03 ERROR ql.Driver: FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.MapRedTask
13/02/25 09:57:03 INFO ql.Driver: </PERFLOG method=Driver.execute start=1361782517580 end=1361782623613 duration=106033>
MapReduce Jobs Launched:
13/02/25 09:57:03 INFO ql.Driver: MapReduce Jobs Launched:
Job 0: Map: 1 Reduce: 1 HDFS Read: 0 HDFS Write: 0 FAIL
13/02/25 09:57:03 INFO ql.Driver: Job 0: Map: 1 Reduce: 1 HDFS Read: 0 HDFS Write: 0 FAIL
Total MapReduce CPU Time Spent: 0 msec
13/02/25 09:57:03 INFO ql.Driver: Total MapReduce CPU Time Spent: 0 msec
13/02/25 09:57:03 INFO ql.Driver: <PERFLOG method=releaseLocks>
13/02/25 09:57:03 INFO ql.Driver: </PERFLOG method=releaseLocks start=1361782623616 end=1361782623616 duration=0>
hive-site.xml
XML
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
<configuration>
 
<property>
<name>hive.metastore.local</name>
<value>true</value>
</property>
 
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:postgresql://rc01m03:5432/metastore?createDatabaseIfNotExist=true</value>
</property>
 
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>org.postgresql.Driver</value>
</property>
 
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hiveuser</value>
</property>
 
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>hiveuser</value>
</property>
 
<property>
<name>datanucleus.validateConstraints</name>
<value>true</value>
<description>Validates existing schema against code. Turn this on if you want to verify existing schema.</description>
</property>
<property>
<name>datanucleus.validateConstraints</name>
<value>true</value>
<description>Validates existing schema against code. Turn this on if you want to verify existing schema.</description>
</property>
 
<property>
<name>hive.security.authorization.enabled</name>
<value>true</value>
<description>Enable or disable the hive client authorization.</description>
</property>
 
<property>
<name>hive.security.authorization.createtable.owner.grants</name>
<value>ALL</value>
<description>The privileges automatically granted to the owner whenever a table gets created.
An example like "select,drop" will grant select and drop privilege to the owner of the table.</description>
</property>
 
<property>
<name>hive.server2.authentication</name>
<value>LDAP</value>
<description>
Client authentication types.
NONE: no authentication check
LDAP: LDAP/AD based authentication
KERBEROS: Kerberos/GSSAPI authentication
</description>
</property>
 
<property>
<name>hive.server2.authentication.ldap.url</name>
<value>ldap://localhost:389</value>
</property>
 
<property>
<name>hive.server2.authentication.ldap.baseDN</name>
<value>ou=People,dc=p4,dc=pl</value>
</property>
 
<property>
<name>hive.exec.parallel</name>
<value>true</value>
<description>Whether to execute jobs in parallel</description>
</property>
 
<property>
<name>hive.aux.jars.path</name>
<value>file:///usr/lib/hive/lib/hive-hbase-handler-0.9.0-cdh4.1.2.jar,file:///usr/lib/hive/lib/hbase.jar,file:///usr/lib/hive/lib/zookeeper.jar,file:///usr/lib/hive/lib/guava-11.0.2.jar</value>
</property>
 
<property>
<name>hbase.zookeeper.quorum</name>
<value>rc01m01,rc01m02,rc01m03</value>
</property>
<property>
<name>hive.server.tcp.keepalive</name>
<value>true</value>
<description>Whether to enable TCP keepalive for the HiveServer. Keepalive will prevent accumulation of half-open connections.</description>
</property>
</configuration>

Please sign in to comment on this gist.

Something went wrong with that request. Please try again.