Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<!-- WARNING!!! This file is auto generated for documentation purposes ONLY! -->
<!-- WARNING!!! Any changes you make to this file will be ignored by Hive. -->
<!-- WARNING!!! You must make your changes in hive-site.xml instead. -->
<!-- Hive Execution Parameters -->
<property>
<name>hive.exec.script.wrapper</name>
<value/>
<description/>
</property>
<property>
<name>hive.exec.plan</name>
<value/>
<description/>
</property>
<property>
<name>hive.exec.stagingdir</name>
<value>.hive-staging</value>
<description>Directory name that will be created inside table locations in order to support HDFS encryption. This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans.</description>
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>/tmp/hive</value>
<description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/&lt;username&gt; is created, with ${hive.scratch.dir.permission}.</description>
</property>
<property>
<name>hive.repl.rootdir</name>
<value>/user/hive/repl/</value>
<description>HDFS root dir for all replication dumps.</description>
</property>
<property>
<name>hive.repl.cm.enabled</name>
<value>false</value>
<description>Turn on ChangeManager, so delete files will go to cmrootdir.</description>
</property>
<property>
<name>hive.repl.cmrootdir</name>
<value>/user/hive/cmroot/</value>
<description>Root dir for ChangeManager, used for deleted files.</description>
</property>
<property>
<name>hive.repl.cm.retain</name>
<value>24h</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is hour if not specified.
Time to retain removed files in cmrootdir.
</description>
</property>
<property>
<name>hive.repl.cm.interval</name>
<value>3600s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
Inteval for cmroot cleanup thread.
</description>
</property>
<property>
<name>hive.repl.replica.functions.root.dir</name>
<value>/user/hive/repl/functions/</value>
<description>Root directory on the replica warehouse where the repl sub-system will store jars from the primary warehouse</description>
</property>
<property>
<name>hive.repl.approx.max.load.tasks</name>
<value>10000</value>
<description>
Provide an approximation of the maximum number of tasks that should be executed before
dynamically generating the next set of tasks. The number is approximate as Hive
will stop at a slightly higher number, the reason being some events might lead to a
task increment that would cross the specified limit.
</description>
</property>
<property>
<name>hive.repl.partitions.dump.parallelism</name>
<value>100</value>
<description>Number of threads that will be used to dump partition data information during repl dump.</description>
</property>
<property>
<name>hive.repl.dumpdir.clean.freq</name>
<value>0s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
Frequency at which timer task runs to purge expired dump dirs.
</description>
</property>
<property>
<name>hive.repl.dumpdir.ttl</name>
<value>7d</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is day if not specified.
TTL of dump dirs before cleanup.
</description>
</property>
<property>
<name>hive.repl.dump.metadata.only</name>
<value>false</value>
<description>Indicates whether replication dump only metadata information or data + metadata.</description>
</property>
<property>
<name>hive.repl.dump.include.acid.tables</name>
<value>false</value>
<description>
Indicates if repl dump should include information about ACID tables. It should be
used in conjunction with 'hive.repl.dump.metadata.only' to enable copying of
metadata for acid tables which do not require the corresponding transaction
semantics to be applied on target. This can be removed when ACID table
replication is supported.
</description>
</property>
<property>
<name>hive.repl.bootstrap.dump.open.txn.timeout</name>
<value>1h</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is hour if not specified.
Indicates the timeout for all transactions which are opened before triggering bootstrap REPL DUMP. If these open transactions are not closed within the timeout value, then REPL DUMP will forcefully abort those transactions and continue with bootstrap dump.
</description>
</property>
<property>
<name>hive.repl.add.raw.reserved.namespace</name>
<value>false</value>
<description>
For TDE with same encryption keys on source and target, allow Distcp super user to access
the raw bytes from filesystem without decrypting on source and then encrypting on target.
</description>
</property>
<property>
<name>hive.exec.local.scratchdir</name>
<value>${system:java.io.tmpdir}/${system:user.name}</value>
<description>Local scratch space for Hive jobs</description>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>${system:java.io.tmpdir}/${hive.session.id}_resources</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<property>
<name>hive.scratch.dir.permission</name>
<value>700</value>
<description>The permission for the user specific scratch directories that get created.</description>
</property>
<property>
<name>hive.exec.submitviachild</name>
<value>false</value>
<description/>
</property>
<property>
<name>hive.exec.submit.local.task.via.child</name>
<value>true</value>
<description>
Determines whether local tasks (typically mapjoin hashtable generation phase) runs in
separate JVM (true recommended) or not.
Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
</description>
</property>
<property>
<name>hive.exec.script.maxerrsize</name>
<value>100000</value>
<description>
Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task).
This prevents runaway scripts from filling logs partitions to capacity
</description>
</property>
<property>
<name>hive.exec.script.allow.partial.consumption</name>
<value>false</value>
<description>
When enabled, this option allows a user script to exit successfully without consuming
all the data from the standard input.
</description>
</property>
<property>
<name>stream.stderr.reporter.prefix</name>
<value>reporter:</value>
<description>Streaming jobs that log to standard error with this prefix can log counter or status information.</description>
</property>
<property>
<name>stream.stderr.reporter.enabled</name>
<value>true</value>
<description>Enable consumption of status and counter messages for streaming jobs.</description>
</property>
<property>
<name>hive.exec.compress.output</name>
<value>false</value>
<description>
This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed.
The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
</description>
</property>
<property>
<name>hive.exec.compress.intermediate</name>
<value>false</value>
<description>
This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed.
The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
</description>
</property>
<property>
<name>hive.intermediate.compression.codec</name>
<value/>
<description/>
</property>
<property>
<name>hive.intermediate.compression.type</name>
<value/>
<description/>
</property>
<property>
<name>hive.exec.reducers.bytes.per.reducer</name>
<value>256000000</value>
<description>size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers.</description>
</property>
<property>
<name>hive.exec.reducers.max</name>
<value>1009</value>
<description>
max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
negative, Hive will use this one as the max number of reducers when automatically determine number of reducers.
</description>
</property>
<property>
<name>hive.exec.pre.hooks</name>
<value/>
<description>
Comma-separated list of pre-execution hooks to be invoked for each statement.
A pre-execution hook is specified as the name of a Java class which implements the
org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
</description>
</property>
<property>
<name>hive.exec.post.hooks</name>
<value/>
<description>
Comma-separated list of post-execution hooks to be invoked for each statement.
A post-execution hook is specified as the name of a Java class which implements the
org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
</description>
</property>
<property>
<name>hive.exec.failure.hooks</name>
<value/>
<description>
Comma-separated list of on-failure hooks to be invoked for each statement.
An on-failure hook is specified as the name of Java class which implements the
org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
</description>
</property>
<property>
<name>hive.exec.query.redactor.hooks</name>
<value/>
<description>
Comma-separated list of hooks to be invoked for each query which can
tranform the query before it's placed in the job.xml file. Must be a Java class which
extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class.
</description>
</property>
<property>
<name>hive.client.stats.publishers</name>
<value/>
<description>
Comma-separated list of statistics publishers to be invoked on counters on each job.
A client stats publisher is specified as the name of a Java class which implements the
org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface.
</description>
</property>
<property>
<name>hive.ats.hook.queue.capacity</name>
<value>64</value>
<description>
Queue size for the ATS Hook executor. If the number of outstanding submissions
to the ATS executor exceed this amount, the Hive ATS Hook will not try to log queries to ATS.
</description>
</property>
<property>
<name>hive.exec.parallel</name>
<value>false</value>
<description>Whether to execute jobs in parallel</description>
</property>
<property>
<name>hive.exec.parallel.thread.number</name>
<value>8</value>
<description>How many jobs at most can be executed in parallel</description>
</property>
<property>
<name>hive.mapred.reduce.tasks.speculative.execution</name>
<value>true</value>
<description>Whether speculative execution for reducers should be turned on. </description>
</property>
<property>
<name>hive.exec.counters.pull.interval</name>
<value>1000</value>
<description>
The interval with which to poll the JobTracker for the counters the running job.
The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be.
</description>
</property>
<property>
<name>hive.exec.dynamic.partition</name>
<value>true</value>
<description>Whether or not to allow dynamic partitions in DML/DDL.</description>
</property>
<property>
<name>hive.exec.dynamic.partition.mode</name>
<value>strict</value>
<description>
In strict mode, the user must specify at least one static partition
in case the user accidentally overwrites all partitions.
In nonstrict mode all partitions are allowed to be dynamic.
</description>
</property>
<property>
<name>hive.exec.max.dynamic.partitions</name>
<value>1000</value>
<description>Maximum number of dynamic partitions allowed to be created in total.</description>
</property>
<property>
<name>hive.exec.max.dynamic.partitions.pernode</name>
<value>100</value>
<description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
</property>
<property>
<name>hive.exec.max.created.files</name>
<value>100000</value>
<description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
</property>
<property>
<name>hive.exec.default.partition.name</name>
<value>__HIVE_DEFAULT_PARTITION__</value>
<description>
The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped.
This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc).
The user has to be aware that the dynamic partition value should not contain this value to avoid confusions.
</description>
</property>
<property>
<name>hive.lockmgr.zookeeper.default.partition.name</name>
<value>__HIVE_DEFAULT_ZOOKEEPER_PARTITION__</value>
<description/>
</property>
<property>
<name>hive.exec.show.job.failure.debug.info</name>
<value>true</value>
<description>
If a job fails, whether to provide a link in the CLI to the task with the
most failures, along with debugging hints if applicable.
</description>
</property>
<property>
<name>hive.exec.job.debug.capture.stacktraces</name>
<value>true</value>
<description>
Whether or not stack traces parsed from the task logs of a sampled failed task
for each failed job should be stored in the SessionState
</description>
</property>
<property>
<name>hive.exec.job.debug.timeout</name>
<value>30000</value>
<description/>
</property>
<property>
<name>hive.exec.tasklog.debug.timeout</name>
<value>20000</value>
<description/>
</property>
<property>
<name>hive.output.file.extension</name>
<value/>
<description>
String used as a file extension for output files.
If not set, defaults to the codec extension for text files (e.g. ".gz"), or no extension otherwise.
</description>
</property>
<property>
<name>hive.testing.short.logs</name>
<value>false</value>
<description>internal usage only, used only in test mode. If set true, when requesting the operation logs the short version (generated by LogDivertAppenderForTest) will be returned</description>
</property>
<property>
<name>hive.testing.remove.logs</name>
<value>true</value>
<description>internal usage only, used only in test mode. If set false, the operation logs, and the operation log directory will not be removed, so they can be found after the test runs.</description>
</property>
<property>
<name>hive.exec.mode.local.auto</name>
<value>false</value>
<description>Let Hive determine whether to run in local mode automatically</description>
</property>
<property>
<name>hive.exec.mode.local.auto.inputbytes.max</name>
<value>134217728</value>
<description>When hive.exec.mode.local.auto is true, input bytes should less than this for local mode.</description>
</property>
<property>
<name>hive.exec.mode.local.auto.input.files.max</name>
<value>4</value>
<description>When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode.</description>
</property>
<property>
<name>hive.exec.drop.ignorenonexistent</name>
<value>true</value>
<description>Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/function</description>
</property>
<property>
<name>hive.ignore.mapjoin.hint</name>
<value>true</value>
<description>Ignore the mapjoin hint</description>
</property>
<property>
<name>hive.file.max.footer</name>
<value>100</value>
<description>maximum number of lines for footer user can define for a table file</description>
</property>
<property>
<name>hive.resultset.use.unique.column.names</name>
<value>true</value>
<description>
Make column names unique in the result set by qualifying column names with table alias if needed.
Table alias will be added to column names for queries of type "select *" or
if query explicitly uses table alias "select r1.x..".
</description>
</property>
<property>
<name>fs.har.impl</name>
<value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
<description>The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20</description>
</property>
<property>
<name>hive.metastore.db.type</name>
<value>DERBY</value>
<description>
Expects one of [derby, oracle, mysql, mssql, postgres].
Type of database used by the metastore. Information schema &amp; JDBCStorageHandler depend on it.
</description>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
<description>location of default database for the warehouse</description>
</property>
<property>
<name>hive.metastore.warehouse.external.dir</name>
<value/>
<description>Default location for external tables created in the warehouse. If not set or null, then the normal warehouse location will be used as the default location.</description>
</property>
<property>
<name>hive.metastore.uris</name>
<value/>
<description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
</property>
<property>
<name>hive.metastore.uri.selection</name>
<value>RANDOM</value>
<description>
Expects one of [sequential, random].
Determines the selection mechanism used by metastore client to connect to remote metastore. SEQUENTIAL implies that the first valid metastore from the URIs specified as part of hive.metastore.uris will be picked. RANDOM implies that the metastore will be picked randomly
</description>
</property>
<property>
<name>hive.metastore.client.capability.check</name>
<value>true</value>
<description>Whether to check client capabilities for potentially breaking API usage.</description>
</property>
<property>
<name>hive.metastore.client.cache.enabled</name>
<value>false</value>
<description>Whether to enable metastore client cache</description>
</property>
<property>
<name>hive.metastore.client.cache.expiry.time</name>
<value>120s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
Expiry time for metastore client cache
</description>
</property>
<property>
<name>hive.metastore.client.cache.initial.capacity</name>
<value>50</value>
<description>Initial capacity for metastore client cache</description>
</property>
<property>
<name>hive.metastore.client.cache.max.capacity</name>
<value>50</value>
<description>Max capacity for metastore client cache</description>
</property>
<property>
<name>hive.metastore.client.cache.stats.enabled</name>
<value>false</value>
<description>Whether to enable metastore client cache stats</description>
</property>
<property>
<name>hive.metastore.fastpath</name>
<value>false</value>
<description>Used to avoid all of the proxies and object copies in the metastore. Note, if this is set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise undefined and most likely undesired behavior will result</description>
</property>
<property>
<name>hive.metastore.fshandler.threads</name>
<value>15</value>
<description>Number of threads to be allocated for metastore handler for fs operations.</description>
</property>
<property>
<name>hive.metastore.hbase.file.metadata.threads</name>
<value>1</value>
<description>Number of threads to use to read file metadata in background to cache it.</description>
</property>
<property>
<name>hive.metastore.uri.resolver</name>
<value/>
<description>If set, fully qualified class name of resolver for hive metastore uri's</description>
</property>
<property>
<name>hive.metastore.connect.retries</name>
<value>3</value>
<description>Number of retries while opening a connection to metastore</description>
</property>
<property>
<name>hive.metastore.failure.retries</name>
<value>1</value>
<description>Number of retries upon failure of Thrift metastore calls</description>
</property>
<property>
<name>hive.metastore.port</name>
<value>9083</value>
<description>Hive metastore listener port</description>
</property>
<property>
<name>hive.metastore.client.connect.retry.delay</name>
<value>1s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
Number of seconds for the client to wait between consecutive connection attempts
</description>
</property>
<property>
<name>hive.metastore.client.socket.timeout</name>
<value>600s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
MetaStore Client socket timeout in seconds
</description>
</property>
<property>
<name>hive.metastore.client.socket.lifetime</name>
<value>0s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
MetaStore Client socket lifetime in seconds. After this time is exceeded, client
reconnects on the next MetaStore operation. A value of 0s means the connection
has an infinite lifetime.
</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>Maxis@123</value>
<description>password to use against metastore database</description>
</property>
<property>
<name>hive.metastore.ds.connection.url.hook</name>
<value/>
<description>Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used</description>
</property>
<property>
<name>javax.jdo.option.Multithreaded</name>
<value>true</value>
<description>Set this to true if multiple threads access metastore through JDO concurrently.</description>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true</value>
<description>
JDBC connect string for a JDBC metastore.
To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.
For example, jdbc:postgresql://myhost/db?ssl=true for postgres database.
</description>
</property>
<property>
<name>hive.metastore.dbaccess.ssl.properties</name>
<value/>
<description>
Comma-separated SSL properties for metastore to access database when JDO connection URL
enables SSL access. e.g. javax.net.ssl.trustStore=/tmp/truststore,javax.net.ssl.trustStorePassword=pwd.
</description>
</property>
<property>
<name>hive.hmshandler.retry.attempts</name>
<value>10</value>
<description>The number of times to retry a HMSHandler call if there were a connection error.</description>
</property>
<property>
<name>hive.hmshandler.retry.interval</name>
<value>2000ms</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
The time between HMSHandler retry attempts on failure.
</description>
</property>
<property>
<name>hive.hmshandler.force.reload.conf</name>
<value>false</value>
<description>
Whether to force reloading of the HMSHandler configuration (including
the connection URL, before the next metastore query that accesses the
datastore. Once reloaded, this value is reset to false. Used for
testing only.
</description>
</property>
<property>
<name>hive.metastore.server.max.message.size</name>
<value>104857600</value>
<description>Maximum message size in bytes a HMS will accept.</description>
</property>
<property>
<name>hive.metastore.server.min.threads</name>
<value>200</value>
<description>Minimum number of worker threads in the Thrift server's pool.</description>
</property>
<property>
<name>hive.metastore.server.max.threads</name>
<value>1000</value>
<description>Maximum number of worker threads in the Thrift server's pool.</description>
</property>
<property>
<name>hive.metastore.server.tcp.keepalive</name>
<value>true</value>
<description>Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections.</description>
</property>
<property>
<name>hive.metastore.wm.default.pool.size</name>
<value>4</value>
<description>
The size of a default pool to create when creating an empty resource plan;
If not positive, no default pool will be created.
</description>
</property>
<property>
<name>hive.metastore.archive.intermediate.original</name>
<value>_INTERMEDIATE_ORIGINAL</value>
<description>
Intermediate dir suffixes used for archiving. Not important what they
are, as long as collisions are avoided
</description>
</property>
<property>
<name>hive.metastore.archive.intermediate.archived</name>
<value>_INTERMEDIATE_ARCHIVED</value>
<description/>
</property>
<property>
<name>hive.metastore.archive.intermediate.extracted</name>
<value>_INTERMEDIATE_EXTRACTED</value>
<description/>
</property>
<property>
<name>hive.metastore.kerberos.keytab.file</name>
<value/>
<description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
</property>
<property>
<name>hive.metastore.kerberos.principal</name>
<value>hive-metastore/_HOST@EXAMPLE.COM</value>
<description>
The service principal for the metastore Thrift server.
The special string _HOST will be replaced automatically with the correct host name.
</description>
</property>
<property>
<name>hive.metastore.client.kerberos.principal</name>
<value/>
<description>The Kerberos principal associated with the HA cluster of hcat_servers.</description>
</property>
<property>
<name>hive.metastore.sasl.enabled</name>
<value>false</value>
<description>If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos.</description>
</property>
<property>
<name>hive.metastore.thrift.framed.transport.enabled</name>
<value>false</value>
<description>If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used.</description>
</property>
<property>
<name>hive.metastore.thrift.compact.protocol.enabled</name>
<value>false</value>
<description>
If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.
Setting it to true will break compatibility with older clients running TBinaryProtocol.
</description>
</property>
<property>
<name>hive.metastore.token.signature</name>
<value/>
<description>The delegation token service name to match when selecting a token from the current user's tokens.</description>
</property>
<property>
<name>hive.cluster.delegation.token.store.class</name>
<value>org.apache.hadoop.hive.thrift.MemoryTokenStore</value>
<description>The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
</property>
<property>
<name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
<value/>
<description>
The ZooKeeper token store connect string. You can re-use the configuration value
set in hive.zookeeper.quorum, by leaving this parameter unset.
</description>
</property>
<property>
<name>hive.cluster.delegation.token.store.zookeeper.znode</name>
<value>/hivedelegation</value>
<description>
The root path for token store data. Note that this is used by both HiveServer2 and
MetaStore to store delegation Token. One directory gets created for each of them.
The final directory names would have the servername appended to it (HIVESERVER2,
METASTORE).
</description>
</property>
<property>
<name>hive.cluster.delegation.token.store.zookeeper.acl</name>
<value/>
<description>
ACL for token store entries. Comma separated list of ACL entries. For example:
sasl:hive/host1@MY.DOMAIN:cdrwa,sasl:hive/host2@MY.DOMAIN:cdrwa
Defaults to all permissions for the hiveserver2/metastore process user.
</description>
</property>
<property>
<name>hive.metastore.cache.pinobjtypes</name>
<value>Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order</value>
<description>List of comma separated metastore object types that should be pinned in the cache</description>
</property>
<property>
<name>datanucleus.connectionPoolingType</name>
<value>HikariCP</value>
<description>
Expects one of [bonecp, dbcp, hikaricp, none].
Specify connection pool library for datanucleus
</description>
</property>
<property>
<name>datanucleus.connectionPool.maxPoolSize</name>
<value>10</value>
<description>
Specify the maximum number of connections in the connection pool. Note: The configured size will be used by
2 connection pools (TxnHandler and ObjectStore). When configuring the max connection pool size, it is
recommended to take into account the number of metastore instances and the number of HiveServer2 instances
configured with embedded metastore. To get optimal performance, set config to meet the following condition
(2 * pool_size * metastore_instances + 2 * pool_size * HS2_instances_with_embedded_metastore) =
(2 * physical_core_count + hard_disk_count).
</description>
</property>
<property>
<name>datanucleus.rdbms.initializeColumnInfo</name>
<value>NONE</value>
<description>initializeColumnInfo setting for DataNucleus; set to NONE at least on Postgres.</description>
</property>
<property>
<name>datanucleus.schema.validateTables</name>
<value>false</value>
<description>validates existing schema against code. turn this on if you want to verify existing schema</description>
</property>
<property>
<name>datanucleus.schema.validateColumns</name>
<value>false</value>
<description>validates existing schema against code. turn this on if you want to verify existing schema</description>
</property>
<property>
<name>datanucleus.schema.validateConstraints</name>
<value>false</value>
<description>validates existing schema against code. turn this on if you want to verify existing schema</description>
</property>
<property>
<name>datanucleus.storeManagerType</name>
<value>rdbms</value>
<description>metadata store type</description>
</property>
<property>
<name>datanucleus.schema.autoCreateAll</name>
<value>false</value>
<description>Auto creates necessary schema on a startup if one doesn't exist. Set this to false, after creating it once.To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not recommended for production use cases, run schematool command instead.</description>
</property>
<property>
<name>hive.metastore.schema.verification</name>
<value>true</value>
<description>
Enforce metastore schema version consistency.
True: Verify that version information stored in is compatible with one from Hive jars. Also disable automatic
schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures
proper metastore schema migration. (Default)
False: Warn if the version information stored in metastore doesn't match with one from in Hive jars.
</description>
</property>
<property>
<name>hive.metastore.schema.verification.record.version</name>
<value>false</value>
<description>
When true the current MS version is recorded in the VERSION table. If this is disabled and verification is
enabled the MS will be unusable.
</description>
</property>
<property>
<name>hive.metastore.schema.info.class</name>
<value>org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo</value>
<description>
Fully qualified class name for the metastore schema information class
which is used by schematool to fetch the schema information.
This class should implement the IMetaStoreSchemaInfo interface
</description>
</property>
<property>
<name>datanucleus.transactionIsolation</name>
<value>read-committed</value>
<description>Default transaction isolation level for identity generation.</description>
</property>
<property>
<name>datanucleus.cache.level2</name>
<value>false</value>
<description>Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server</description>
</property>
<property>
<name>datanucleus.cache.level2.type</name>
<value>none</value>
<description/>
</property>
<property>
<name>datanucleus.identifierFactory</name>
<value>datanucleus1</value>
<description>
Name of the identifier factory to use when generating table/column names etc.
'datanucleus1' is used for backward compatibility with DataNucleus v1
</description>
</property>
<property>
<name>datanucleus.rdbms.useLegacyNativeValueStrategy</name>
<value>true</value>
<description/>
</property>
<property>
<name>datanucleus.plugin.pluginRegistryBundleCheck</name>
<value>LOG</value>
<description>Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]</description>
</property>
<property>
<name>hive.metastore.batch.retrieve.max</name>
<value>300</value>
<description>
Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch.
The higher the number, the less the number of round trips is needed to the Hive metastore server,
but it may also cause higher memory requirement at the client side.
</description>
</property>
<property>
<name>hive.metastore.batch.retrieve.table.partition.max</name>
<value>1000</value>
<description>Maximum number of objects that metastore internally retrieves in one batch.</description>
</property>
<property>
<name>hive.metastore.init.hooks</name>
<value/>
<description>
A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization.
An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener.
</description>
</property>
<property>
<name>hive.metastore.pre.event.listeners</name>
<value/>
<description>List of comma separated listeners for metastore events.</description>
</property>
<property>
<name>hive.metastore.event.listeners</name>
<value/>
<description>A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener interface. The metastore event and corresponding listener method will be invoked in separate JDO transactions. Alternatively, configure hive.metastore.transactional.event.listeners to ensure both are invoked in same JDO transaction.</description>
</property>
<property>
<name>hive.metastore.transactional.event.listeners</name>
<value/>
<description>A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener interface. Both the metastore event and corresponding listener method will be invoked in the same JDO transaction.</description>
</property>
<property>
<name>hive.notification.sequence.lock.max.retries</name>
<value>5</value>
<description>Number of retries required to acquire a lock when getting the next notification sequential ID for entries in the NOTIFICATION_LOG table.</description>
</property>
<property>
<name>hive.notification.sequence.lock.retry.sleep.interval</name>
<value>500</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
Sleep interval between retries to acquire a notification lock as described part of property NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES
</description>
</property>
<property>
<name>hive.metastore.event.db.listener.timetolive</name>
<value>86400s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
time after which events will be removed from the database listener queue
</description>
</property>
<property>
<name>hive.metastore.event.db.notification.api.auth</name>
<value>true</value>
<description>
Should metastore do authorization against database notification related APIs such as get_next_notification.
If set to true, then only the superusers in proxy settings have the permission
</description>
</property>
<property>
<name>hive.metastore.authorization.storage.checks</name>
<value>false</value>
<description>
Should the metastore do authorization checks against the underlying storage (usually hdfs)
for operations like drop-partition (disallow the drop-partition if the user in
question doesn't have permissions to delete the corresponding directory
on the storage).
</description>
</property>
<property>
<name>hive.metastore.authorization.storage.check.externaltable.drop</name>
<value>true</value>
<description>
Should StorageBasedAuthorization check permission of the storage before dropping external table.
StorageBasedAuthorization already does this check for managed table. For external table however,
anyone who has read permission of the directory could drop external table, which is surprising.
The flag is set to false by default to maintain backward compatibility.
</description>
</property>
<property>
<name>hive.metastore.event.clean.freq</name>
<value>0s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
Frequency at which timer task runs to purge expired events in metastore.
</description>
</property>
<property>
<name>hive.metastore.event.expiry.duration</name>
<value>0s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
Duration after which events expire from events table
</description>
</property>
<property>
<name>hive.metastore.event.message.factory</name>
<value>org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory</value>
<description>Factory class for making encoding and decoding messages in the events generated.</description>
</property>
<property>
<name>hive.metastore.execute.setugi</name>
<value>true</value>
<description>
In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using
the client's reported user and group permissions. Note that this property must be set on
both the client and server sides. Further note that its best effort.
If client sets its to true and server sets it to false, client setting will be ignored.
</description>
</property>
<property>
<name>hive.metastore.partition.name.whitelist.pattern</name>
<value/>
<description>Partition names will be checked against this regex pattern and rejected if not matched.</description>
</property>
<property>
<name>hive.metastore.integral.jdo.pushdown</name>
<value>false</value>
<description>
Allow JDO query pushdown for integral partition columns in metastore. Off by default. This
improves metastore perf for integral columns, especially if there's a large number of partitions.
However, it doesn't work correctly with integral values that are not normalized (e.g. have
leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization
is also irrelevant.
</description>
</property>
<property>
<name>hive.metastore.try.direct.sql</name>
<value>true</value>
<description>
Whether the Hive metastore should try to use direct SQL queries instead of the
DataNucleus for certain read paths. This can improve metastore performance when
fetching many partitions or column statistics by orders of magnitude; however, it
is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,
the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't
work for all queries on your datastore. If all SQL queries fail (for example, your
metastore is backed by MongoDB), you might want to disable this to save the
try-and-fall-back cost.
</description>
</property>
<property>
<name>hive.metastore.direct.sql.batch.size</name>
<value>0</value>
<description>
Batch size for partition and other object retrieval from the underlying DB in direct
SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations
that necessitate this. For DBs that can handle the queries, this isn't necessary and
may impede performance. -1 means no batching, 0 means automatic batching.
</description>
</property>
<property>
<name>hive.metastore.try.direct.sql.ddl</name>
<value>true</value>
<description>
Same as hive.metastore.try.direct.sql, for read statements within a transaction that
modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL
select query has incorrect syntax or something similar inside a transaction, the
entire transaction will fail and fall-back to DataNucleus will not be possible. You
should disable the usage of direct SQL inside transactions if that happens in your case.
</description>
</property>
<property>
<name>hive.direct.sql.max.query.length</name>
<value>100</value>
<description>
The maximum
size of a query string (in KB).
</description>
</property>
<property>
<name>hive.direct.sql.max.elements.in.clause</name>
<value>1000</value>
<description>
The maximum number of values in a IN clause. Once exceeded, it will be broken into
multiple OR separated IN clauses.
</description>
</property>
<property>
<name>hive.direct.sql.max.elements.values.clause</name>
<value>1000</value>
<description>The maximum number of values in a VALUES clause for INSERT statement.</description>
</property>
<property>
<name>hive.metastore.orm.retrieveMapNullsAsEmptyStrings</name>
<value>false</value>
<description>Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, pruning is the correct behaviour</description>
</property>
<property>
<name>hive.metastore.disallow.incompatible.col.type.changes</name>
<value>true</value>
<description>
If true (default is false), ALTER TABLE operations which change the type of a
column (say STRING) to an incompatible type (say MAP) are disallowed.
RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the
datatypes can be converted from string to any type. The map is also serialized as
a string, which can be read as a string as well. However, with any binary
serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions
when subsequently trying to access old partitions.
Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are
not blocked.
See HIVE-4409 for more details.
</description>
</property>
<property>
<name>hive.metastore.limit.partition.request</name>
<value>-1</value>
<description>
This limits the number of partitions that can be requested from the metastore for a given table.
The default value "-1" means no limit.
</description>
</property>
<property>
<name>hive.table.parameters.default</name>
<value/>
<description>Default property values for newly created tables</description>
</property>
<property>
<name>hive.ddl.createtablelike.properties.whitelist</name>
<value/>
<description>Table Properties to copy over when executing a Create Table Like.</description>
</property>
<property>
<name>hive.metastore.rawstore.impl</name>
<value>org.apache.hadoop.hive.metastore.ObjectStore</value>
<description>
Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface.
This class is used to store and retrieval of raw metadata objects such as table, database
</description>
</property>
<property>
<name>hive.metastore.txn.store.impl</name>
<value>org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler</value>
<description>Name of class that implements org.apache.hadoop.hive.metastore.txn.TxnStore. This class is used to store and retrieve transactions and locks</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>org.apache.derby.jdbc.EmbeddedDriver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.PersistenceManagerFactoryClass</name>
<value>org.datanucleus.api.jdo.JDOPersistenceManagerFactory</value>
<description>class implementing the jdo persistence</description>
</property>
<property>
<name>hive.metastore.expression.proxy</name>
<value>org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore</value>
<description/>
</property>
<property>
<name>javax.jdo.option.DetachAllOnCommit</name>
<value>true</value>
<description>Detaches all objects from session so that they can be used after transaction is committed</description>
</property>
<property>
<name>javax.jdo.option.NonTransactionalRead</name>
<value>true</value>
<description>Reads outside of transactions</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
<description>Username to use against metastore database</description>
</property>
<property>
<name>hive.metastore.end.function.listeners</name>
<value/>
<description>List of comma separated listeners for the end of metastore functions.</description>
</property>
<property>
<name>hive.metastore.partition.inherit.table.properties</name>
<value/>
<description>
List of comma separated keys occurring in table properties which will get inherited to newly created partitions.
* implies all the keys will get inherited.
</description>
</property>
<property>
<name>hive.metastore.filter.hook</name>
<value>org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl</value>
<description>Metastore hook class for filtering the metadata read results. If hive.security.authorization.manageris set to instance of HiveAuthorizerFactory, then this value is ignored.</description>
</property>
<property>
<name>hive.metastore.dml.events</name>
<value>false</value>
<description>If true, the metastore will be asked to fire events for DML operations</description>
</property>
<property>
<name>hive.metastore.client.drop.partitions.using.expressions</name>
<value>true</value>
<description>Choose whether dropping partitions with HCatClient pushes the partition-predicate to the metastore, or drops partitions iteratively</description>
</property>
<property>
<name>hive.metastore.aggregate.stats.cache.enabled</name>
<value>true</value>
<description>Whether aggregate stats caching is enabled or not.</description>
</property>
<property>
<name>hive.metastore.aggregate.stats.cache.size</name>
<value>10000</value>
<description>Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache.</description>
</property>
<property>
<name>hive.metastore.aggregate.stats.cache.max.partitions</name>
<value>10000</value>
<description>Maximum number of partitions that are aggregated per cache node.</description>
</property>
<property>
<name>hive.metastore.aggregate.stats.cache.fpp</name>
<value>0.01</value>
<description>Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%).</description>
</property>
<property>
<name>hive.metastore.aggregate.stats.cache.max.variance</name>
<value>0.01</value>
<description>Maximum tolerable variance in number of partitions between a cached node and our request (default 1%).</description>
</property>
<property>
<name>hive.metastore.aggregate.stats.cache.ttl</name>
<value>600s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
Number of seconds for a cached node to be active in the cache before they become stale.
</description>
</property>
<property>
<name>hive.metastore.aggregate.stats.cache.max.writer.wait</name>
<value>5000ms</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
Number of milliseconds a writer will wait to acquire the writelock before giving up.
</description>
</property>
<property>
<name>hive.metastore.aggregate.stats.cache.max.reader.wait</name>
<value>1000ms</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
Number of milliseconds a reader will wait to acquire the readlock before giving up.
</description>
</property>
<property>
<name>hive.metastore.aggregate.stats.cache.max.full</name>
<value>0.9</value>
<description>Maximum cache full % after which the cache cleaner thread kicks in.</description>
</property>
<property>
<name>hive.metastore.aggregate.stats.cache.clean.until</name>
<value>0.8</value>
<description>The cleaner thread cleans until cache reaches this % full size.</description>
</property>
<property>
<name>hive.metastore.metrics.enabled</name>
<value>false</value>
<description>Enable metrics on the metastore.</description>
</property>
<property>
<name>hive.metastore.initial.metadata.count.enabled</name>
<value>true</value>
<description>Enable a metadata count at metastore startup for metrics.</description>
</property>
<property>
<name>hive.metastore.use.SSL</name>
<value>false</value>
<description>Set this to true for using SSL encryption in HMS server.</description>
</property>
<property>
<name>hive.metastore.keystore.path</name>
<value/>
<description>Metastore SSL certificate keystore location.</description>
</property>
<property>
<name>hive.metastore.keystore.password</name>
<value/>
<description>Metastore SSL certificate keystore password.</description>
</property>
<property>
<name>hive.metastore.truststore.path</name>
<value/>
<description>Metastore SSL certificate truststore location.</description>
</property>
<property>
<name>hive.metastore.truststore.password</name>
<value/>
<description>Metastore SSL certificate truststore password.</description>
</property>
<property>
<name>hive.metadata.export.location</name>
<value/>
<description>
When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener,
it is the location to which the metadata will be exported. The default is an empty string, which results in the
metadata being exported to the current user's home directory on HDFS.
</description>
</property>
<property>
<name>hive.metadata.move.exported.metadata.to.trash</name>
<value>true</value>
<description>
When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener,
this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory
alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data.
</description>
</property>
<property>
<name>hive.cli.errors.ignore</name>
<value>false</value>
<description/>
</property>
<property>
<name>hive.cli.print.current.db</name>
<value>false</value>
<description>Whether to include the current database in the Hive prompt.</description>
</property>
<property>
<name>hive.cli.prompt</name>
<value>hive</value>
<description>
Command line prompt configuration value. Other hiveconf can be used in this configuration value.
Variable substitution will only be invoked at the Hive CLI startup.
</description>
</property>
<property>
<name>hive.cli.pretty.output.num.cols</name>
<value>-1</value>
<description>
The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command.
If the value of this property is -1, then Hive will use the auto-detected terminal width.
</description>
</property>
<property>
<name>hive.metastore.fs.handler.class</name>
<value>org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl</value>
<description/>
</property>
<property>
<name>hive.session.id</name>
<value/>
<description/>
</property>
<property>
<name>hive.session.silent</name>
<value>false</value>
<description/>
</property>
<property>
<name>hive.local.time.zone</name>
<value>LOCAL</value>
<description>
Sets the time-zone for displaying and interpreting time stamps. If this property value is set to
LOCAL, it is not specified, or it is not a correct time-zone, the system default time-zone will be
used instead. Time-zone IDs can be specified as region-based zone IDs (based on IANA time-zone data),
abbreviated zone IDs, or offset IDs.
</description>
</property>
<property>
<name>hive.session.history.enabled</name>
<value>false</value>
<description>Whether to log Hive query, query plan, runtime statistics etc.</description>
</property>
<property>
<name>hive.query.string</name>
<value/>
<description>Query being executed (might be multiple per a session)</description>
</property>
<property>
<name>hive.query.id</name>
<value/>
<description>ID for query being executed (might be multiple per a session)</description>
</property>
<property>
<name>hive.jobname.length</name>
<value>50</value>
<description>max jobname length</description>
</property>
<property>
<name>hive.jar.path</name>
<value/>
<description>The location of hive_cli.jar that is used when submitting jobs in a separate jvm.</description>
</property>
<property>
<name>hive.aux.jars.path</name>
<value/>
<description>The location of the plugin jars that contain implementations of user defined functions and serdes.</description>
</property>
<property>
<name>hive.reloadable.aux.jars.path</name>
<value/>
<description>
The locations of the plugin jars, which can be a comma-separated folders or jars. Jars can be renewed
by executing reload command. And these jars can be used as the auxiliary classes like creating a UDF or SerDe.
</description>
</property>
<property>
<name>hive.added.files.path</name>
<value/>
<description>This an internal parameter.</description>
</property>
<property>
<name>hive.added.jars.path</name>
<value/>
<description>This an internal parameter.</description>
</property>
<property>
<name>hive.added.archives.path</name>
<value/>
<description>This an internal parameter.</description>
</property>
<property>
<name>hive.resource.use.hdfs.location</name>
<value>true</value>
<description>Reference HDFS based files/jars directly instead of copy to session based HDFS scratch directory, to make distributed cache more useful.</description>
</property>
<property>
<name>hive.auto.progress.timeout</name>
<value>0s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
How long to run autoprogressor for the script/UDTF operators.
Set to 0 for forever.
</description>
</property>
<property>
<name>hive.script.auto.progress</name>
<value>false</value>
<description>
Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker
to avoid the task getting killed because of inactivity. Hive sends progress information when the script is
outputting to stderr. This option removes the need of periodically producing stderr messages,
but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker.
</description>
</property>
<property>
<name>hive.script.operator.id.env.var</name>
<value>HIVE_SCRIPT_OPERATOR_ID</value>
<description>
Name of the environment variable that holds the unique script operator ID in the user's
transform function (the custom mapper/reducer that the user has specified in the query)
</description>
</property>
<property>
<name>hive.script.operator.truncate.env</name>
<value>false</value>
<description>Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)</description>
</property>
<property>
<name>hive.script.operator.env.blacklist</name>
<value>hive.txn.valid.txns,hive.txn.tables.valid.writeids,hive.txn.valid.writeids,hive.script.operator.env.blacklist</value>
<description>Comma separated list of keys from the configuration file not to convert to environment variables when invoking the script operator</description>
</property>
<property>
<name>hive.strict.checks.orderby.no.limit</name>
<value>false</value>
<description>
Enabling strict large query checks disallows the following:
Orderby without limit.
Note that this check currently does not consider data size, only the query pattern.
</description>
</property>
<property>
<name>hive.strict.checks.no.partition.filter</name>
<value>false</value>
<description>
Enabling strict large query checks disallows the following:
No partition being picked up for a query against partitioned table.
Note that this check currently does not consider data size, only the query pattern.
</description>
</property>
<property>
<name>hive.strict.checks.type.safety</name>
<value>true</value>
<description>
Enabling strict type safety checks disallows the following:
Comparing bigints and strings.
Comparing bigints and doubles.
</description>
</property>
<property>
<name>hive.strict.checks.cartesian.product</name>
<value>false</value>
<description>
Enabling strict Cartesian join checks disallows the following:
Cartesian product (cross join).
</description>
</property>
<property>
<name>hive.strict.checks.bucketing</name>
<value>true</value>
<description>
Enabling strict bucketing checks disallows the following:
Load into bucketed tables.
</description>
</property>
<property>
<name>hive.load.data.owner</name>
<value/>
<description>Set the owner of files loaded using load data in managed tables.</description>
</property>
<property>
<name>hive.mapred.mode</name>
<value/>
<description>Deprecated; use hive.strict.checks.* settings instead.</description>
</property>
<property>
<name>hive.alias</name>
<value/>
<description/>
</property>
<property>
<name>hive.map.aggr</name>
<value>true</value>
<description>Whether to use map-side aggregation in Hive Group By queries</description>
</property>
<property>
<name>hive.groupby.skewindata</name>
<value>false</value>
<description>Whether there is skew in data to optimize group by queries</description>
</property>
<property>
<name>hive.join.emit.interval</name>
<value>1000</value>
<description>How many rows in the right-most join operand Hive should buffer before emitting the join result.</description>
</property>
<property>
<name>hive.join.cache.size</name>
<value>25000</value>
<description>How many rows in the joining tables (except the streaming table) should be cached in memory.</description>
</property>
<property>
<name>hive.join.inner.residual</name>
<value>false</value>
<description>Whether to push non-equi filter predicates within inner joins. This can improve efficiency in the evaluation of certain joins, since we will not be emitting rows which are thrown away by a Filter operator straight away. However, currently vectorization does not support them, thus enabling it is only recommended when vectorization is disabled.</description>
</property>
<property>
<name>hive.cbo.enable</name>
<value>true</value>
<description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
</property>
<property>
<name>hive.cbo.cnf.maxnodes</name>
<value>-1</value>
<description>When converting to conjunctive normal form (CNF), fail ifthe expression exceeds this threshold; the threshold is expressed in terms of number of nodes (leaves andinterior nodes). -1 to not set up a threshold.</description>
</property>
<property>
<name>hive.cbo.returnpath.hiveop</name>
<value>false</value>
<description>Flag to control calcite plan to hive operator conversion</description>
</property>
<property>
<name>hive.cbo.costmodel.extended</name>
<value>false</value>
<description>Flag to control enabling the extended cost model based onCPU, IO and cardinality. Otherwise, the cost model is based on cardinality.</description>
</property>
<property>
<name>hive.cbo.costmodel.cpu</name>
<value>0.000001</value>
<description>Default cost of a comparison</description>
</property>
<property>
<name>hive.cbo.costmodel.network</name>
<value>150.0</value>
<description>Default cost of a transferring a byte over network; expressed as multiple of CPU cost</description>
</property>
<property>
<name>hive.cbo.costmodel.local.fs.write</name>
<value>4.0</value>
<description>Default cost of writing a byte to local FS; expressed as multiple of NETWORK cost</description>
</property>
<property>
<name>hive.cbo.costmodel.local.fs.read</name>
<value>4.0</value>
<description>Default cost of reading a byte from local FS; expressed as multiple of NETWORK cost</description>
</property>
<property>
<name>hive.cbo.costmodel.hdfs.write</name>
<value>10.0</value>
<description>Default cost of writing a byte to HDFS; expressed as multiple of Local FS write cost</description>
</property>
<property>
<name>hive.cbo.costmodel.hdfs.read</name>
<value>1.5</value>
<description>Default cost of reading a byte from HDFS; expressed as multiple of Local FS read cost</description>
</property>
<property>
<name>hive.cbo.show.warnings</name>
<value>true</value>
<description>Toggle display of CBO warnings like missing column stats</description>
</property>
<property>
<name>hive.transpose.aggr.join</name>
<value>false</value>
<description>push aggregates through join</description>
</property>
<property>
<name>hive.optimize.semijoin.conversion</name>
<value>true</value>
<description>convert group by followed by inner equi join into semijoin</description>
</property>
<property>
<name>hive.order.columnalignment</name>
<value>true</value>
<description>Flag to control whether we want to try to aligncolumns in operators such as Aggregate or Join so that we try to reduce the number of shuffling stages</description>
</property>
<property>
<name>hive.materializedview.rewriting</name>
<value>true</value>
<description>Whether to try to rewrite queries using the materialized views enabled for rewriting</description>
</property>
<property>
<name>hive.materializedview.rewriting.strategy</name>
<value>heuristic</value>
<description>
Expects one of [heuristic, costbased].
The strategy that should be used to cost and select the materialized view rewriting.
heuristic: Always try to select the plan using the materialized view if rewriting produced one,choosing the plan with lower cost among possible plans containing a materialized view
costbased: Fully cost-based strategy, always use plan with lower cost, independently on whether it uses a materialized view or not
</description>
</property>
<property>
<name>hive.materializedview.rewriting.time.window</name>
<value>0min</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is min if not specified.
Time window, specified in seconds, after which outdated materialized views become invalid for automatic query rewriting.
For instance, if more time than the value assigned to the property has passed since the materialized view was created or rebuilt, and one of its source tables has changed since, the materialized view will not be considered for rewriting. Default value 0 means that the materialized view cannot be outdated to be used automatically in query rewriting. Value -1 means to skip this check.
</description>
</property>
<property>
<name>hive.materializedview.rewriting.incremental</name>
<value>false</value>
<description>
Whether to try to execute incremental rewritings based on outdated materializations and
current content of tables. Default value of true effectively amounts to enabling incremental
rebuild for the materializations too.
</description>
</property>
<property>
<name>hive.materializedview.rebuild.incremental</name>
<value>true</value>
<description>
Whether to try to execute incremental rebuild for the materialized views. Incremental rebuild
tries to modify the original materialization contents to reflect the latest changes to the
materialized view source tables, instead of rebuilding the contents fully. Incremental rebuild
is based on the materialized view algebraic incremental rewriting.
</description>
</property>
<property>
<name>hive.materializedview.fileformat</name>
<value>ORC</value>
<description>
Expects one of [none, textfile, sequencefile, rcfile, orc].
Default file format for CREATE MATERIALIZED VIEW statement
</description>
</property>
<property>
<name>hive.materializedview.serde</name>
<value>org.apache.hadoop.hive.ql.io.orc.OrcSerde</value>
<description>Default SerDe used for materialized views</description>
</property>
<property>
<name>hive.mapjoin.bucket.cache.size</name>
<value>100</value>
<description/>
</property>
<property>
<name>hive.mapjoin.optimized.hashtable</name>
<value>true</value>
<description>
Whether Hive should use memory-optimized hash table for MapJoin.
Only works on Tez and Spark, because memory-optimized hashtable cannot be serialized.
</description>
</property>
<property>
<name>hive.mapjoin.optimized.hashtable.probe.percent</name>
<value>0.5</value>
<description>Probing space percentage of the optimized hashtable</description>
</property>
<property>
<name>hive.mapjoin.hybridgrace.hashtable</name>
<value>true</value>
<description>Whether to use hybridgrace hash join as the join method for mapjoin. Tez only.</description>
</property>
<property>
<name>hive.mapjoin.hybridgrace.memcheckfrequency</name>
<value>1024</value>
<description>For hybrid grace hash join, how often (how many rows apart) we check if memory is full. This number should be power of 2.</description>
</property>
<property>
<name>hive.mapjoin.hybridgrace.minwbsize</name>
<value>524288</value>
<description>For hybrid graceHash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB.</description>
</property>
<property>
<name>hive.mapjoin.hybridgrace.minnumpartitions</name>
<value>16</value>
<description>ForHybrid grace hash join, the minimum number of partitions to create.</description>
</property>
<property>
<name>hive.mapjoin.optimized.hashtable.wbsize</name>
<value>8388608</value>
<description>
Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to
store data. This is one buffer size. HT may be slightly faster if this is larger, but for small
joins unnecessary memory will be allocated and then trimmed.
</description>
</property>
<property>
<name>hive.mapjoin.hybridgrace.bloomfilter</name>
<value>true</value>
<description>Whether to use BloomFilter in Hybrid grace hash join to minimize unnecessary spilling.</description>
</property>
<property>
<name>hive.smbjoin.cache.rows</name>
<value>10000</value>
<description>How many rows with the same key value should be cached in memory per smb joined table.</description>
</property>
<property>
<name>hive.groupby.mapaggr.checkinterval</name>
<value>100000</value>
<description>Number of rows after which size of the grouping keys/aggregation classes is performed</description>
</property>
<property>
<name>hive.map.aggr.hash.percentmemory</name>
<value>0.5</value>
<description>Portion of total memory to be used by map-side group aggregation hash table</description>
</property>
<property>
<name>hive.mapjoin.followby.map.aggr.hash.percentmemory</name>
<value>0.3</value>
<description>Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join</description>
</property>
<property>
<name>hive.map.aggr.hash.force.flush.memory.threshold</name>
<value>0.9</value>
<description>
The max memory to be used by map-side group aggregation hash table.
If the memory usage is higher than this number, force to flush data
</description>
</property>
<property>
<name>hive.map.aggr.hash.min.reduction</name>
<value>0.5</value>
<description>
Hash aggregation will be turned off if the ratio between hash table size and input rows is bigger than this number.
Set to 1 to make sure hash aggregation is never turned off.
</description>
</property>
<property>
<name>hive.multigroupby.singlereducer</name>
<value>true</value>
<description>
Whether to optimize multi group by query to generate single M/R job plan. If the multi group by query has
common group by keys, it will be optimized to generate single M/R job.
</description>
</property>
<property>
<name>hive.map.groupby.sorted</name>
<value>true</value>
<description>
If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform
the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this
is that it limits the number of mappers to the number of files.
</description>
</property>
<property>
<name>hive.groupby.position.alias</name>
<value>false</value>
<description>Whether to enable using Column Position Alias in Group By</description>
</property>
<property>
<name>hive.orderby.position.alias</name>
<value>true</value>
<description>Whether to enable using Column Position Alias in Order By</description>
</property>
<property>
<name>hive.groupby.orderby.position.alias</name>
<value>false</value>
<description>
Whether to enable using Column Position Alias in Group By or Order By (deprecated).
Use hive.orderby.position.alias or hive.groupby.position.alias instead
</description>
</property>
<property>
<name>hive.new.job.grouping.set.cardinality</name>
<value>30</value>
<description>
Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.
For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;
4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).
This can lead to explosion across map-reduce boundary if the cardinality of T is very high,
and map-side aggregation does not do a very good job.
This parameter decides if Hive should add an additional map-reduce job. If the grouping set
cardinality (4 in the example above), is more than this value, a new MR job is added under the
assumption that the original group by will reduce the data size.
</description>
</property>
<property>
<name>hive.groupby.limit.extrastep</name>
<value>true</value>
<description>
This parameter decides if Hive should
create new MR job for sorting final output
</description>
</property>
<property>
<name>hive.exec.copyfile.maxnumfiles</name>
<value>1</value>
<description>Maximum number of files Hive uses to do sequential HDFS copies between directories.Distributed copies (distcp) will be used instead for larger numbers of files so that copies can be done faster.</description>
</property>
<property>
<name>hive.exec.copyfile.maxsize</name>
<value>33554432</value>
<description>Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories.Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster.</description>
</property>
<property>
<name>hive.udtf.auto.progress</name>
<value>false</value>
<description>
Whether Hive should automatically send progress information to TaskTracker
when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious
because this may prevent TaskTracker from killing tasks with infinite loops.
</description>
</property>
<property>
<name>hive.default.fileformat</name>
<value>TextFile</value>
<description>
Expects one of [textfile, sequencefile, rcfile, orc, parquet].
Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]
</description>
</property>
<property>
<name>hive.default.fileformat.managed</name>
<value>none</value>
<description>
Expects one of [none, textfile, sequencefile, rcfile, orc, parquet].
Default file format for CREATE TABLE statement applied to managed tables only. External tables will be
created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat
for all tables.
</description>
</property>
<property>
<name>hive.query.result.fileformat</name>
<value>SequenceFile</value>
<description>
Expects one of [textfile, sequencefile, rcfile, llap].
Default file format for storing result of the query.
</description>
</property>
<property>
<name>hive.fileformat.check</name>
<value>true</value>
<description>Whether to check file format or not when loading data files</description>
</property>
<property>
<name>hive.default.rcfile.serde</name>
<value>org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe</value>
<description>The default SerDe Hive will use for the RCFile format</description>
</property>
<property>
<name>hive.default.serde</name>
<value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
<description>The default SerDe Hive will use for storage formats that do not specify a SerDe.</description>
</property>
<property>
<name>hive.serdes.using.metastore.for.schema</name>
<value>org.apache.hadoop.hive.ql.io.orc.OrcSerde,org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe,org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe,org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe,org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe,org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe,org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe,org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe</value>
<description>SerDes retrieving schema from metastore. This is an internal parameter.</description>
</property>
<property>
<name>hive.legacy.schema.for.all.serdes</name>
<value>false</value>
<description>
A backward compatibility setting for external metastore users that do not handle
hive.serdes.using.metastore.for.schema correctly. This may be removed at any time.
</description>
</property>
<property>
<name>hive.querylog.location</name>
<value>${system:java.io.tmpdir}/${system:user.name}</value>
<description>Location of Hive run time structured log file</description>
</property>
<property>
<name>hive.querylog.enable.plan.progress</name>
<value>true</value>
<description>
Whether to log the plan's progress every time a job's progress is checked.
These logs are written to the location specified by hive.querylog.location
</description>
</property>
<property>
<name>hive.querylog.plan.progress.interval</name>
<value>60000ms</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
The interval to wait between logging the plan's progress.
If there is a whole number percentage change in the progress of the mappers or the reducers,
the progress is logged regardless of this value.
The actual interval will be the ceiling of (this value divided by the value of
hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval
I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be
logged less frequently than specified.
This only has an effect if hive.querylog.enable.plan.progress is set to true.
</description>
</property>
<property>
<name>hive.script.serde</name>
<value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
<description>The default SerDe for transmitting input data to and reading output data from the user scripts. </description>
</property>
<property>
<name>hive.script.recordreader</name>
<value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value>
<description>The default record reader for reading data from the user scripts. </description>
</property>
<property>
<name>hive.script.recordwriter</name>
<value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value>
<description>The default record writer for writing data to the user scripts. </description>
</property>
<property>
<name>hive.transform.escape.input</name>
<value>false</value>
<description>
This adds an option to escape special chars (newlines, carriage returns and
tabs) when they are passed to the user script. This is useful if the Hive tables
can contain data that contains special characters.
</description>
</property>
<property>
<name>hive.binary.record.max.length</name>
<value>1000</value>
<description>
Read from a binary stream and treat each hive.binary.record.max.length bytes as a record.
The last record before the end of stream can have less than hive.binary.record.max.length bytes
</description>
</property>
<property>
<name>hive.mapred.local.mem</name>
<value>0</value>
<description>mapper/reducer memory in local mode</description>
</property>
<property>
<name>hive.mapjoin.smalltable.filesize</name>
<value>25000000</value>
<description>
The threshold for the input file size of the small tables; if the file size is smaller
than this threshold, it will try to convert the common join into map join
</description>
</property>
<property>
<name>hive.exec.schema.evolution</name>
<value>true</value>
<description>Use schema evolution to convert self-describing file format's data to the schema desired by the reader.</description>
</property>
<property>
<name>hive.transactional.events.mem</name>
<value>10000000</value>
<description>
Vectorized ACID readers can often load all the delete events from all the delete deltas
into memory to optimize for performance. To prevent out-of-memory errors, this is a rough heuristic
that limits the total number of delete events that can be loaded into memory at once.
Roughly it has been set to 10 million delete events per bucket (~160 MB).
</description>
</property>
<property>
<name>hive.sample.seednumber</name>
<value>0</value>
<description>A number used to percentage sampling. By changing this number, user will change the subsets of data sampled.</description>
</property>
<property>
<name>hive.test.mode</name>
<value>false</value>
<description>Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.</description>
</property>
<property>
<name>hive.exim.test.mode</name>
<value>false</value>
<description>The subset of test mode that only enables custom path handling for ExIm.</description>
</property>
<property>
<name>hive.test.mode.prefix</name>
<value>test_</value>
<description>In test mode, specifies prefixes for the output table</description>
</property>
<property>
<name>hive.test.mode.samplefreq</name>
<value>32</value>
<description>
In test mode, specifies sampling frequency for table, which is not bucketed,
For example, the following query:
INSERT OVERWRITE TABLE dest SELECT col1 from src
would be converted to
INSERT OVERWRITE TABLE test_dest
SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))
</description>
</property>
<property>
<name>hive.test.mode.nosamplelist</name>
<value/>
<description>In test mode, specifies comma separated table names which would not apply sampling</description>
</property>
<property>
<name>hive.test.dummystats.aggregator</name>
<value/>
<description>internal variable for test</description>
</property>
<property>
<name>hive.test.dummystats.publisher</name>
<value/>
<description>internal variable for test</description>
</property>
<property>
<name>hive.test.currenttimestamp</name>
<value/>
<description>current timestamp for test</description>
</property>
<property>
<name>hive.test.rollbacktxn</name>
<value>false</value>
<description>For testing only. Will mark every ACID transaction aborted</description>
</property>
<property>
<name>hive.test.fail.compaction</name>
<value>false</value>
<description>For testing only. Will cause CompactorMR to fail.</description>
</property>
<property>
<name>hive.test.fail.heartbeater</name>
<value>false</value>
<description>For testing only. Will cause Heartbeater to fail.</description>
</property>
<property>
<name>hive.test.bucketcodec.version</name>
<value>1</value>
<description>
For testing only. Will make ACID subsystem write RecordIdentifier.bucketId in specified
format
</description>
</property>
<property>
<name>hive.merge.mapfiles</name>
<value>true</value>
<description>Merge small files at the end of a map-only job</description>
</property>
<property>
<name>hive.merge.mapredfiles</name>
<value>false</value>
<description>Merge small files at the end of a map-reduce job</description>
</property>
<property>
<name>hive.merge.tezfiles</name>
<value>false</value>
<description>Merge small files at the end of a Tez DAG</description>
</property>
<property>
<name>hive.merge.sparkfiles</name>
<value>false</value>
<description>Merge small files at the end of a Spark DAG Transformation</description>
</property>
<property>
<name>hive.merge.size.per.task</name>
<value>256000000</value>
<description>Size of merged files at the end of the job</description>
</property>
<property>
<name>hive.merge.smallfiles.avgsize</name>
<value>16000000</value>
<description>
When the average output file size of a job is less than this number, Hive will start an additional
map-reduce job to merge the output files into bigger files. This is only done for map-only jobs
if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.
</description>
</property>
<property>
<name>hive.merge.rcfile.block.level</name>
<value>true</value>
<description/>
</property>
<property>
<name>hive.merge.orcfile.stripe.level</name>
<value>true</value>
<description>
When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled
while writing a table with ORC file format, enabling this config will do stripe-level
fast merge for small ORC files. Note that enabling this config will not honor the
padding tolerance config (hive.exec.orc.block.padding.tolerance).
</description>
</property>
<property>
<name>hive.use.orc.codec.pool</name>
<value>false</value>
<description>Whether to use codec pool in ORC. Disable if there are bugs with codec reuse.</description>
</property>
<property>
<name>hive.exec.rcfile.use.explicit.header</name>
<value>true</value>
<description>
If this is set the header for RCFiles will simply be RCF. If this is not
set the header will be that borrowed from sequence files, e.g. SEQ- followed
by the input and output RCFile formats.
</description>
</property>
<property>
<name>hive.exec.rcfile.use.sync.cache</name>
<value>true</value>
<description/>
</property>
<property>
<name>hive.io.rcfile.record.interval</name>
<value>2147483647</value>
<description/>
</property>
<property>
<name>hive.io.rcfile.column.number.conf</name>
<value>0</value>
<description/>
</property>
<property>
<name>hive.io.rcfile.tolerate.corruptions</name>
<value>false</value>
<description/>
</property>
<property>
<name>hive.io.rcfile.record.buffer.size</name>
<value>4194304</value>
<description/>
</property>
<property>
<name>parquet.memory.pool.ratio</name>
<value>0.5</value>
<description>
Maximum fraction of heap that can be used by Parquet file writers in one task.
It is for avoiding OutOfMemory error in tasks. Work with Parquet 1.6.0 and above.
This config parameter is defined in Parquet, so that it does not start with 'hive.'.
</description>
</property>
<property>
<name>hive.parquet.timestamp.skip.conversion</name>
<value>false</value>
<description>Current Hive implementation of parquet stores timestamps to UTC, this flag allows skipping of the conversionon reading parquet files from other tools</description>
</property>
<property>
<name>hive.avro.timestamp.skip.conversion</name>
<value>false</value>
<description>Some older Hive implementations (pre-3.1) wrote Avro timestamps in a UTC-normalizedmanner, while from version 3.1 until now Hive wrote time zone agnostic timestamps. Setting this flag to true will treat legacy timestamps as time zone agnostic. Setting it to false will treat legacy timestamps as UTC-normalized. This flag will not affect timestamps written after this change.</description>
</property>
<property>
<name>hive.int.timestamp.conversion.in.seconds</name>
<value>false</value>
<description>
Boolean/tinyint/smallint/int/bigint value is interpreted as milliseconds during the timestamp conversion.
Set this flag to true to interpret the value as seconds to be consistent with float/double.
</description>
</property>
<property>
<name>hive.exec.orc.base.delta.ratio</name>
<value>8</value>
<description>
The ratio of base writer and
delta writer in terms of STRIPE_SIZE and BUFFER_SIZE.
</description>
</property>
<property>
<name>hive.exec.orc.delta.streaming.optimizations.enabled</name>
<value>false</value>
<description>
Whether to enable streaming optimizations for ORC delta files. This will disable ORC's internal indexes,
disable compression, enable fast encoding and disable dictionary encoding.
</description>
</property>
<property>
<name>hive.exec.orc.split.strategy</name>
<value>HYBRID</value>
<description>
Expects one of [hybrid, bi, etl].
This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation as opposed to query execution (split generation does not read or cache file footers). ETL strategy is used when spending little more time in split generation is acceptable (split generation reads and caches file footers). HYBRID chooses between the above strategies based on heuristics.
</description>
</property>
<property>
<name>hive.streaming.auto.flush.enabled</name>
<value>true</value>
<description>
Whether to enable memory
monitoring and automatic flushing of open record updaters during streaming ingest. This is an expert level
setting and disabling this may have severe performance impact under memory pressure.
</description>
</property>
<property>
<name>hive.heap.memory.monitor.usage.threshold</name>
<value>0.7</value>
<description>
Hive streaming does automatic memory management across all open record writers. This threshold will let the
memory monitor take an action (flush open files) when heap memory usage exceeded this threshold.
</description>
</property>
<property>
<name>hive.streaming.auto.flush.check.interval.size</name>
<value>100Mb</value>
<description>
Expects a byte size value with unit (blank for bytes, kb, mb, gb, tb, pb).
Hive streaming ingest has auto flush mechanism to flush all open record updaters under memory pressure.
When memory usage exceed hive.heap.memory.monitor.default.usage.threshold, the auto-flush mechanism will
wait until this size (default 100Mb) of records are ingested before triggering flush.
</description>
</property>
<property>
<name>hive.classloader.shade.prefix</name>
<value/>
<description>
During reflective instantiation of a class
(input, output formats, serde etc.), when classloader throws ClassNotFoundException, as a fallback this
shade prefix will be used before class reference and retried.
</description>
</property>
<property>
<name>hive.orc.splits.ms.footer.cache.enabled</name>
<value>false</value>
<description>Whether to enable using file metadata cache in metastore for ORC file footers.</description>
</property>
<property>
<name>hive.orc.splits.ms.footer.cache.ppd.enabled</name>
<value>true</value>
<description>
Whether to enable file footer cache PPD (hive.orc.splits.ms.footer.cache.enabled
must also be set to true for this to work).
</description>
</property>
<property>
<name>hive.orc.splits.include.file.footer</name>
<value>false</value>
<description>
If turned on splits generated by orc will include metadata about the stripes in the file. This
data is read remotely (from the client or HS2 machine) and sent to all the tasks.
</description>
</property>
<property>
<name>hive.orc.splits.directory.batch.ms</name>
<value>0</value>
<description>
How long, in ms, to wait to batch input directories for processing during ORC split
generation. 0 means process directories individually. This can increase the number of
metastore calls if metastore metadata cache is used.
</description>
</property>
<property>
<name>hive.orc.splits.include.fileid</name>
<value>true</value>
<description>Include file ID in splits on file systems that support it.</description>
</property>
<property>
<name>hive.orc.splits.allow.synthetic.fileid</name>
<value>true</value>
<description>Allow synthetic file ID in splits on file systems that don't have a native one.</description>
</property>
<property>
<name>hive.orc.cache.stripe.details.mem.size</name>
<value>256Mb</value>
<description>
Expects a byte size value with unit (blank for bytes, kb, mb, gb, tb, pb).
Maximum size of orc splits cached in the client.
</description>
</property>
<property>
<name>hive.orc.compute.splits.num.threads</name>
<value>10</value>
<description>How many threads orc should use to create splits in parallel.</description>
</property>
<property>
<name>hive.orc.cache.use.soft.references</name>
<value>false</value>
<description>
By default, the cache that ORC input format uses to store orc file footer use hard
references for the cached object. Setting this to true can help avoid out of memory
issues under memory pressure (in some cases) at the cost of slight unpredictability in
overall query performance.
</description>
</property>
<property>
<name>hive.io.sarg.cache.max.weight.mb</name>
<value>10</value>
<description>The max weight allowed for the SearchArgument Cache. By default, the cache allows a max-weight of 10MB, after which entries will be evicted.</description>
</property>
<property>
<name>hive.lazysimple.extended_boolean_literal</name>
<value>false</value>
<description>
LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f',
'1', and '0' as extended, legal boolean literal, in addition to 'TRUE' and 'FALSE'.
The default is false, which means only 'TRUE' and 'FALSE' are treated as legal
boolean literal.
</description>
</property>
<property>
<name>hive.optimize.skewjoin</name>
<value>false</value>
<description>
Whether to enable skew join optimization.
The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of
processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce
job, process those skewed keys. The same key need not be skewed for all the tables, and so,
the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a
map-join.
</description>
</property>
<property>
<name>hive.optimize.dynamic.partition.hashjoin</name>
<value>false</value>
<description>
Whether to enable dynamically partitioned hash join optimization.
This setting is also dependent on enabling hive.auto.convert.join
</description>
</property>
<property>
<name>hive.auto.convert.join</name>
<value>true</value>
<description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file size</description>
</property>
<property>
<name>hive.auto.convert.join.noconditionaltask</name>
<value>true</value>
<description>
Whether Hive enables the optimization about converting common join into mapjoin based on the input file size.
If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
specified size, the join is directly converted to a mapjoin (there is no conditional task).
</description>
</property>
<property>
<name>hive.auto.convert.join.noconditionaltask.size</name>
<value>10000000</value>
<description>
If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect.
However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size,
the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB
</description>
</property>
<property>
<name>hive.auto.convert.join.use.nonstaged</name>
<value>false</value>
<description>
For conditional joins, if input stream from a small alias can be directly applied to join operator without
filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.
Currently, this is not working with vectorization or tez execution engine.
</description>
</property>
<property>
<name>hive.skewjoin.key</name>
<value>100000</value>
<description>
Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,
we think the key as a skew join key.
</description>
</property>
<property>
<name>hive.skewjoin.mapjoin.map.tasks</name>
<value>10000</value>
<description>
Determine the number of map task used in the follow up map join job for a skew join.
It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control.
</description>
</property>
<property>
<name>hive.skewjoin.mapjoin.min.split</name>
<value>33554432</value>
<description>
Determine the number of map task at most used in the follow up map join job for a skew join by specifying
the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control.
</description>
</property>
<property>
<name>hive.heartbeat.interval</name>
<value>1000</value>
<description>Send a heartbeat after this interval - used by mapjoin and filter operators</description>
</property>
<property>
<name>hive.limit.row.max.size</name>
<value>100000</value>
<description>When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least.</description>
</property>
<property>
<name>hive.limit.optimize.limit.file</name>
<value>10</value>
<description>When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample.</description>
</property>
<property>
<name>hive.limit.optimize.enable</name>
<value>false</value>
<description>Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first.</description>
</property>
<property>
<name>hive.limit.optimize.fetch.max</name>
<value>50000</value>
<description>
Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query.
Insert queries are not restricted by this limit.
</description>
</property>
<property>
<name>hive.limit.pushdown.memory.usage</name>
<value>0.1</value>
<description>
Expects value between 0.0f and 1.0f.
The fraction of available memory to be used for buffering rows in Reducesink operator for limit pushdown optimization.
</description>
</property>
<property>
<name>hive.auto.convert.join.hashtable.max.entries</name>
<value>21000000</value>
<description>
If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect.
However, if it is on, and the predicted number of entries in hashtable for a given join
input is larger than this number, the join will not be converted to a mapjoin.
The value "-1" means no limit.
</description>
</property>
<property>
<name>hive.auto.convert.join.shuffle.max.size</name>
<value>10000000000</value>
<description>
If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect.
However, if it is on, and the predicted size of the larger input for a given join is greater
than this number, the join will not be converted to a dynamically partitioned hash join.
The value "-1" means no limit.
</description>
</property>
<property>
<name>hive.hashtable.key.count.adjustment</name>
<value>2.0</value>
<description>Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate of the number of keys is divided by this value. If the value is 0, statistics are not usedand hive.hashtable.initialCapacity is used instead.</description>
</property>
<property>
<name>hive.hashtable.initialCapacity</name>
<value>100000</value>
<description>Initial capacity of mapjoin hashtable if statistics are absent, or if hive.hashtable.key.count.adjustment is set to 0</description>
</property>
<property>
<name>hive.hashtable.loadfactor</name>
<value>0.75</value>
<description/>
</property>
<property>
<name>hive.mapjoin.followby.gby.localtask.max.memory.usage</name>
<value>0.55</value>
<description>
This number means how much memory the local task can take to hold the key/value into an in-memory hash table
when this map join is followed by a group by. If the local task's memory usage is more than this number,
the local task will abort by itself. It means the data of the small table is too large to be held in memory.
</description>
</property>
<property>
<name>hive.mapjoin.localtask.max.memory.usage</name>
<value>0.9</value>
<description>
This number means how much memory the local task can take to hold the key/value into an in-memory hash table.
If the local task's memory usage is more than this number, the local task will abort by itself.
It means the data of the small table is too large to be held in memory.
</description>
</property>
<property>
<name>hive.mapjoin.check.memory.rows</name>
<value>100000</value>
<description>The number means after how many rows processed it needs to check the memory usage</description>
</property>
<property>
<name>hive.debug.localtask</name>
<value>false</value>
<description/>
</property>
<property>
<name>hive.input.format</name>
<value>org.apache.hadoop.hive.ql.io.CombineHiveInputFormat</value>
<description>The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat.</description>
</property>
<property>
<name>hive.tez.input.format</name>
<value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
<description>The default input format for tez. Tez groups splits in the AM.</description>
</property>
<property>
<name>hive.tez.container.size</name>
<value>-1</value>
<description>By default Tez will spawn containers of the size of a mapper. This can be used to overwrite.</description>
</property>
<property>
<name>hive.tez.cpu.vcores</name>
<value>-1</value>
<description>
By default Tez will ask for however many cpus map-reduce is configured to use per container.
This can be used to overwrite.
</description>
</property>
<property>
<name>hive.tez.java.opts</name>
<value/>
<description>By default Tez will use the Java options from map tasks. This can be used to overwrite.</description>
</property>
<property>
<name>hive.tez.log.level</name>
<value>INFO</value>
<description>
The log level to use for tasks executing as part of the DAG.
Used only if hive.tez.java.opts is used to configure Java options.
</description>
</property>
<property>
<name>hive.tez.hs2.user.access</name>
<value>true</value>
<description>Whether to grant access to the hs2/hive user for queries</description>
</property>
<property>
<name>hive.query.name</name>
<value/>
<description>
This named is used by Tez to set the dag name. This name in turn will appear on
the Tez UI representing the work that was done. Used by Spark to set the query name, will show up in the
Spark UI.
</description>
</property>
<property>
<name>hive.optimize.bucketingsorting</name>
<value>true</value>
<description>
Don't create a reducer for enforcing
bucketing/sorting for queries of the form:
insert overwrite table T2 select * from T1;
where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets.
</description>
</property>
<property>
<name>hive.mapred.partitioner</name>
<value>org.apache.hadoop.hive.ql.io.DefaultHivePartitioner</value>
<description/>
</property>
<property>
<name>hive.enforce.sortmergebucketmapjoin</name>
<value>false</value>
<description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?</description>
</property>
<property>
<name>hive.enforce.bucketmapjoin</name>
<value>false</value>
<description>
If the user asked for bucketed map-side join, and it cannot be performed,
should the query fail or not ? For example, if the buckets in the tables being joined are
not a multiple of each other, bucketed map-side join cannot be performed, and the
query will fail if hive.enforce.bucketmapjoin is set to true.
</description>
</property>
<property>
<name>hive.constraint.notnull.enforce</name>
<value>true</value>
<description>Should "IS NOT NULL " constraint be enforced?</description>
</property>
<property>
<name>hive.auto.convert.sortmerge.join</name>
<value>true</value>
<description>Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join.</description>
</property>
<property>
<name>hive.auto.convert.sortmerge.join.reduce.side</name>
<value>true</value>
<description>Whether hive.auto.convert.sortmerge.join (if enabled) should be applied to reduce side.</description>
</property>
<property>
<name>hive.auto.convert.sortmerge.join.bigtable.selection.policy</name>
<value>org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ</value>
<description>
The policy to choose the big table for automatic conversion to sort-merge join.
By default, the table with the largest partitions is assigned the big table. All policies are:
. based on position of the table - the leftmost table is selected
org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.
. based on total size (all the partitions selected in the query) of the table
org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ.
. based on average size (all the partitions selected in the query) of the table
org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.
New policies can be added in future.
</description>
</property>
<property>
<name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
<value>false</value>
<description>
If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join,
this parameter decides whether each table should be tried as a big table, and effectively a map-join should be
tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the
big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a
sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted
and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table
with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster
if the complete small table can fit in memory, and a map-join can be performed.
</description>
</property>
<property>
<name>hive.exec.script.trust</name>
<value>false</value>
<description/>
</property>
<property>
<name>hive.exec.rowoffset</name>
<value>false</value>
<description>Whether to provide the row offset virtual column</description>
</property>
<property>
<name>hive.optimize.index.filter</name>
<value>false</value>
<description>Whether to enable automatic use of indexes</description>
</property>
<property>
<name>hive.optimize.ppd</name>
<value>true</value>
<description>Whether to enable predicate pushdown</description>
</property>
<property>
<name>hive.optimize.ppd.windowing</name>
<value>true</value>
<description>Whether to enable predicate pushdown through windowing</description>
</property>
<property>
<name>hive.ppd.recognizetransivity</name>
<value>true</value>
<description>Whether to transitively replicate predicate filters over equijoin conditions.</description>
</property>
<property>
<name>hive.ppd.remove.duplicatefilters</name>
<value>true</value>
<description>
During query optimization, filters may be pushed down in the operator tree.
If this config is true only pushed down filters remain in the operator tree,
and the original filter is removed. If this config is false, the original filter
is also left in the operator tree at the original place.
</description>
</property>
<property>
<name>hive.optimize.point.lookup</name>
<value>true</value>
<description>Whether to transform OR clauses in Filter operators into IN clauses</description>
</property>
<property>
<name>hive.optimize.point.lookup.min</name>
<value>31</value>
<description>Minimum number of OR clauses needed to transform into IN clauses</description>
</property>
<property>
<name>hive.optimize.countdistinct</name>
<value>true</value>
<description>Whether to transform count distinct into two stages</description>
</property>
<property>
<name>hive.optimize.partition.columns.separate</name>
<value>true</value>
<description>Extract partition columns from IN clauses</description>
</property>
<property>
<name>hive.optimize.constant.propagation</name>
<value>true</value>
<description>Whether to enable constant propagation optimizer</description>
</property>
<property>
<name>hive.optimize.remove.identity.project</name>
<value>true</value>
<description>Removes identity project from operator tree</description>
</property>
<property>
<name>hive.optimize.metadataonly</name>
<value>false</value>
<description>
Whether to eliminate scans of the tables from which no columns are selected. Note
that, when selecting from empty tables with data files, this can produce incorrect
results, so it's disabled by default. It works correctly for normal tables.
</description>
</property>
<property>
<name>hive.optimize.null.scan</name>
<value>true</value>
<description>Dont scan relations which are guaranteed to not generate any rows</description>
</property>
<property>
<name>hive.optimize.ppd.storage</name>
<value>true</value>
<description>Whether to push predicates down to storage handlers</description>
</property>
<property>
<name>hive.optimize.groupby</name>
<value>true</value>
<description>Whether to enable the bucketed group by from bucketed partitions/tables.</description>
</property>
<property>
<name>hive.optimize.bucketmapjoin</name>
<value>false</value>
<description>Whether to try bucket mapjoin</description>
</property>
<property>
<name>hive.optimize.bucketmapjoin.sortedmerge</name>
<value>false</value>
<description>Whether to try sorted bucket merge map join</description>
</property>
<property>
<name>hive.optimize.reducededuplication</name>
<value>true</value>
<description>
Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
This should always be set to true. Since it is a new feature, it has been made configurable.
</description>
</property>
<property>
<name>hive.optimize.reducededuplication.min.reducer</name>
<value>4</value>
<description>
Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
The optimization will be automatically disabled if number of reducers would be less than specified value.
</description>
</property>
<property>
<name>hive.optimize.joinreducededuplication</name>
<value>true</value>
<description>
Remove extra shuffle/sorting operations after join algorithm selection has been executed.
Currently it only works with Apache Tez. This should always be set to true.
Since it is a new feature, it has been made configurable.
</description>
</property>
<property>
<name>hive.optimize.sort.dynamic.partition</name>
<value>false</value>
<description>
When enabled dynamic partitioning column will be globally sorted.
This way we can keep only one record writer open for each partition value
in the reducer thereby reducing the memory pressure on reducers.
</description>
</property>
<property>
<name>hive.optimize.sampling.orderby</name>
<value>false</value>
<description>Uses sampling on order-by clause for parallel execution.</description>
</property>
<property>
<name>hive.optimize.sampling.orderby.number</name>
<value>1000</value>
<description>Total number of samples to be obtained.</description>
</property>
<property>
<name>hive.optimize.sampling.orderby.percent</name>
<value>0.1</value>
<description>
Expects value between 0.0f and 1.0f.
Probability with which a row will be chosen.
</description>
</property>
<property>
<name>hive.remove.orderby.in.subquery</name>
<value>true</value>
<description>If set to true, order/sort by without limit in sub queries will be removed.</description>
</property>
<property>
<name>hive.optimize.distinct.rewrite</name>
<value>true</value>
<description>When applicable this optimization rewrites distinct aggregates from a single stage to multi-stage aggregation. This may not be optimal in all cases. Ideally, whether to trigger it or not should be cost based decision. Until Hive formalizes cost model for this, this is config driven.</description>
</property>
<property>
<name>hive.optimize.union.remove</name>
<value>false</value>
<description>
Whether to remove the union and push the operators between union and the filesink above union.
This avoids an extra scan of the output by union. This is independently useful for union
queries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an
extra union is inserted.
The merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true.
If the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the
number of reducers are few, so the number of files anyway are small. However, with this optimization,
we are increasing the number of files possibly by a big margin. So, we merge aggressively.
</description>
</property>
<property>
<name>hive.optimize.correlation</name>
<value>false</value>
<description>exploit intra-query correlations.</description>
</property>
<property>
<name>hive.optimize.limittranspose</name>
<value>false</value>
<description>
Whether to push a limit through left/right outer join or union. If the value is true and the size of the outer
input is reduced enough (as specified in hive.optimize.limittranspose.reduction), the limit is pushed
to the outer input or union; to remain semantically correct, the limit is kept on top of the join or the union too.
</description>
</property>
<property>
<name>hive.optimize.limittranspose.reductionpercentage</name>
<value>1.0</value>
<description>
When hive.optimize.limittranspose is true, this variable specifies the minimal reduction of the
size of the outer input of the join or input of the union that we should get in order to apply the rule.
</description>
</property>
<property>
<name>hive.optimize.limittranspose.reductiontuples</name>
<value>0</value>
<description>
When hive.optimize.limittranspose is true, this variable specifies the minimal reduction in the
number of tuples of the outer input of the join or the input of the union that you should get in order to apply the rule.
</description>
</property>
<property>
<name>hive.optimize.filter.stats.reduction</name>
<value>false</value>
<description>
Whether to simplify comparison
expressions in filter operators using column stats
</description>
</property>
<property>
<name>hive.optimize.skewjoin.compiletime</name>
<value>false</value>
<description>
Whether to create a separate plan for skewed keys for the tables in the join.
This is based on the skewed keys stored in the metadata. At compile time, the plan is broken
into different joins: one for the skewed keys, and the other for the remaining keys. And then,
a union is performed for the 2 joins generated above. So unless the same skewed key is present
in both the joined tables, the join for the skewed key will be performed as a map-side join.
The main difference between this parameter and hive.optimize.skewjoin is that this parameter
uses the skew information stored in the metastore to optimize the plan at compile time itself.
If there is no skew information in the metadata, this parameter will not have any affect.
Both hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true.
Ideally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing
so for backward compatibility.
If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime
would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op.
</description>
</property>
<property>
<name>hive.optimize.shared.work</name>
<value>true</value>
<description>
Whether to enable shared work optimizer. The optimizer finds scan operator over the same table
and follow-up operators in the query plan and merges them if they meet some preconditions. Tez only.
</description>
</property>
<property>
<name>hive.optimize.shared.work.extended</name>
<value>true</value>
<description>
Whether to enable shared work extended optimizer. The optimizer tries to merge equal operators
after a work boundary after shared work optimizer has been executed. Requires hive.optimize.shared.work
to be set to true. Tez only.
</description>
</property>
<property>
<name>hive.combine.equivalent.work.optimization</name>
<value>true</value>
<description>
Whether to combine equivalent work objects during physical optimization.
This optimization looks for equivalent work objects and combines them if they meet certain preconditions. Spark only.
</description>
</property>
<property>
<name>hive.optimize.remove.sq_count_check</name>
<value>false</value>
<description>Whether to remove an extra join with sq_count_check for scalar subqueries with constant group by keys.</description>
</property>
<property>
<name>hive.optimize.update.table.properties.from.serde</name>
<value>false</value>
<description>
Whether to update table-properties by initializing tables' SerDe instances during logical-optimization.
By doing so, certain SerDe classes (like AvroSerDe) can pre-calculate table-specific information, and
store it in table-properties, to be used later in the SerDe, while running the job.
</description>
</property>
<property>
<name>hive.optimize.update.table.properties.from.serde.list</name>
<value>org.apache.hadoop.hive.serde2.avro.AvroSerDe</