Skip to content

Instantly share code, notes, and snippets.

@geoHeil
Created February 13, 2017 17:32
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save geoHeil/b54181d35c0d4549c0da25465cc93e29 to your computer and use it in GitHub Desktop.
Save geoHeil/b54181d35c0d4549c0da25465cc93e29 to your computer and use it in GitHub Desktop.
[root@mn01 data]# ls
command-10.json command-20.json command-6.json errors-15.txt errors-25.txt OOZIE_CLIENT_config.json output-19.txt output-5.txt structured-out-11.json structured-out-24.json version
command-11.json command-21.json command-7.json errors-16.txt errors-2.txt output-10.txt output-20.txt output-6.txt structured-out-12.json structured-out-2.json
command-12.json command-22.json command-8.json errors-17.txt errors-3.txt output-11.txt output-21.txt output-7.txt structured-out-13.json structured-out-3.json
command-13.json command-23.json command-9.json errors-18.txt errors-4.txt output-12.txt output-22.txt output-8.txt structured-out-14.json structured-out-4.json
command-14.json command-24.json config.json errors-19.txt errors-5.txt output-13.txt output-23.txt output-9.txt structured-out-15.json structured-out-5.json
command-15.json command-25.json errors-10.txt errors-20.txt errors-6.txt output-14.txt output-24.txt PIG_config.json structured-out-17.json structured-out-6.json
command-16.json command-2.json errors-11.txt errors-21.txt errors-7.txt output-15.txt output-25.txt status_command.json structured-out-20.json structured-out-7.json
command-17.json command-3.json errors-12.txt errors-22.txt errors-8.txt output-16.txt output-2.txt status_command_stderr.txt structured-out-21.json structured-out-8.json
command-18.json command-4.json errors-13.txt errors-23.txt errors-9.txt output-17.txt output-3.txt status_command_stdout.txt structured-out-22.json structured-out-9.json
command-19.json command-5.json errors-14.txt errors-24.txt hostcheck.result output-18.txt output-4.txt structured-out-10.json structured-out-23.json structured-out-status.json
[root@mn01 data]# cat command-25.json
{
"localComponents": [
"METRICS_MONITOR",
"KAFKA_BROKER",
"OOZIE_CLIENT",
"HBASE_CLIENT",
"SPARK_CLIENT",
"HIVE_SERVER",
"ZOOKEEPER_CLIENT",
"KERBEROS_CLIENT",
"HCAT",
"HBASE_MASTER",
"HIVE_METASTORE",
"HDFS_CLIENT",
"HBASE_REGIONSERVER",
"FALCON_CLIENT",
"PIG",
"NAMENODE",
"DATANODE",
"MAPREDUCE2_CLIENT",
"NODEMANAGER",
"HIVE_CLIENT",
"SQOOP",
"TEZ_CLIENT",
"YARN_CLIENT",
"RANGER_ADMIN",
"RESOURCEMANAGER",
"WEBHCAT_SERVER",
"OOZIE_SERVER",
"APP_TIMELINE_SERVER",
"RANGER_USERSYNC",
"FALCON_SERVER",
"SECONDARY_NAMENODE",
"HISTORYSERVER",
"ZOOKEEPER_SERVER",
"METRICS_COLLECTOR",
"SPARK_JOBHISTORYSERVER"
],
"configuration_attributes": {
"spark-defaults": {},
"sqoop-site": {},
"ranger-hdfs-audit": {},
"ranger-hdfs-policymgr-ssl": {},
"pig-env": {},
"ranger-kafka-plugin-properties": {},
"ams-grafana-env": {},
"usersync-log4j": {},
"llap-cli-log4j2": {},
"spark-hive-site-override": {},
"falcon-runtime.properties": {},
"ranger-hive-security": {},
"falcon-log4j": {},
"kerberos-env": {},
"ams-hbase-security-site": {},
"oozie-env": {},
"spark-env": {},
"hdfs-site": {
"final": {
"dfs.datanode.data.dir": "true",
"dfs.namenode.http-address": "true",
"dfs.datanode.failed.volumes.tolerated": "true",
"dfs.support.append": "true",
"dfs.namenode.name.dir": "true",
"dfs.webhdfs.enabled": "true"
}
},
"ams-env": {},
"zookeeper-log4j": {},
"hdfs-log4j": {},
"ranger-yarn-audit": {},
"ranger-hdfs-plugin-properties": {},
"pig-properties": {},
"oozie-log4j": {},
"hadoop-env": {},
"tez-interactive-site": {},
"livy-spark-blacklist": {},
"admin-log4j": {},
"hiveserver2-site": {},
"ranger-hbase-security": {},
"ssl-client": {},
"falcon-startup.properties": {},
"hive-log4j2": {},
"tagsync-application-properties": {},
"ranger-hive-policymgr-ssl": {},
"spark-javaopts-properties": {},
"webhcat-env": {},
"ams-ssl-server": {},
"tez-site": {},
"spark-thrift-sparkconf": {
"final": {
"spark.eventLog.dir": "true",
"spark.eventLog.enabled": "true",
"spark.history.fs.logDirectory": "true"
}
},
"ranger-tagsync-site": {},
"hive-exec-log4j": {},
"hive-interactive-site": {},
"ranger-hive-plugin-properties": {},
"krb5-conf": {},
"core-site": {
"final": {
"fs.defaultFS": "true"
}
},
"hiveserver2-interactive-site": {},
"capacity-scheduler": {},
"ranger-kafka-security": {},
"kafka-env": {},
"tagsync-log4j": {},
"usersync-properties": {},
"ams-log4j": {},
"hive-exec-log4j2": {},
"zookeeper-env": {},
"ams-hbase-log4j": {},
"cluster-env": {},
"livy-log4j-properties": {},
"mapred-site": {},
"webhcat-log4j": {},
"ranger-yarn-plugin-properties": {},
"ranger-admin-site": {},
"ams-hbase-site": {
"final": {
"hbase.zookeeper.quorum": "true"
}
},
"ranger-ugsync-site": {},
"hivemetastore-site": {},
"spark-log4j-properties": {},
"hbase-site": {},
"ams-hbase-policy": {},
"hadoop-policy": {},
"hive-site": {
"hidden": {
"javax.jdo.option.ConnectionPassword": "HIVE_CLIENT,WEBHCAT_SERVER,HCAT,CONFIG_DOWNLOAD"
}
},
"hive-interactive-env": {},
"falcon-atlas-application.properties": {},
"hive-env": {},
"ranger-yarn-policymgr-ssl": {},
"yarn-site": {},
"ranger-hbase-plugin-properties": {},
"falcon-client.properties": {},
"webhcat-site": {},
"kafka-log4j": {},
"llap-daemon-log4j": {},
"hive-log4j": {},
"ranger-hdfs-security": {},
"sqoop-atlas-application.properties": {},
"mapred-env": {},
"ranger-hive-audit": {},
"sqoop-env": {},
"livy-conf": {},
"ranger-hbase-audit": {},
"livy-env": {},
"ranger-env": {},
"ams-site": {},
"ams-ssl-client": {},
"spark-thrift-fairscheduler": {},
"hbase-policy": {},
"hive-atlas-application.properties": {},
"admin-properties": {},
"hcat-env": {},
"falcon-env": {},
"zoo.cfg": {},
"kafka-broker": {},
"ams-grafana-ini": {},
"tez-env": {},
"ranger-kafka-policymgr-ssl": {},
"spark-metrics-properties": {},
"ams-hbase-env": {},
"ranger-kafka-audit": {},
"yarn-env": {},
"beeline-log4j2": {},
"ranger-yarn-security": {},
"hbase-log4j": {},
"oozie-site": {},
"ssl-server": {},
"ranger-site": {},
"hbase-env": {},
"yarn-log4j": {},
"ranger-hbase-policymgr-ssl": {},
"pig-log4j": {}
},
"public_hostname": "mn01.vagrant",
"commandId": "4-0",
"hostname": "mn01.vagrant",
"kerberosCommandParams": [],
"serviceName": "RANGER",
"role": "RANGER_ADMIN",
"forceRefreshConfigTagsBeforeExecution": false,
"requestId": 4,
"agentConfigParams": {
"agent": {
"parallel_execution": 0
}
},
"clusterName": "examplecluster",
"commandType": "EXECUTION_COMMAND",
"taskId": 25,
"roleParams": {},
"configurationTags": {
"spark-defaults": {
"tag": "TOPOLOGY_RESOLVED"
},
"sqoop-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hdfs-audit": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hdfs-policymgr-ssl": {
"tag": "TOPOLOGY_RESOLVED"
},
"pig-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-kafka-plugin-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-grafana-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hive-policymgr-ssl": {
"tag": "TOPOLOGY_RESOLVED"
},
"llap-cli-log4j2": {
"tag": "TOPOLOGY_RESOLVED"
},
"spark-hive-site-override": {
"tag": "TOPOLOGY_RESOLVED"
},
"falcon-runtime.properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hive-security": {
"tag": "TOPOLOGY_RESOLVED"
},
"falcon-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"kerberos-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-hbase-security-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"oozie-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"spark-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"hdfs-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"zookeeper-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"hdfs-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"hbase-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-yarn-audit": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hdfs-plugin-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"pig-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"oozie-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"hadoop-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"tez-interactive-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"livy-spark-blacklist": {
"tag": "TOPOLOGY_RESOLVED"
},
"admin-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"hiveserver2-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hbase-security": {
"tag": "TOPOLOGY_RESOLVED"
},
"ssl-client": {
"tag": "TOPOLOGY_RESOLVED"
},
"falcon-startup.properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"hive-log4j2": {
"tag": "TOPOLOGY_RESOLVED"
},
"tagsync-application-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"beeline-log4j2": {
"tag": "TOPOLOGY_RESOLVED"
},
"usersync-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"spark-javaopts-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"webhcat-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-ssl-server": {
"tag": "TOPOLOGY_RESOLVED"
},
"tez-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"spark-thrift-sparkconf": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-tagsync-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"hive-exec-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-kafka-policymgr-ssl": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hive-plugin-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"krb5-conf": {
"tag": "TOPOLOGY_RESOLVED"
},
"core-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"hiveserver2-interactive-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"capacity-scheduler": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hive-audit": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-kafka-security": {
"tag": "TOPOLOGY_RESOLVED"
},
"kafka-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"tagsync-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"usersync-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"hive-exec-log4j2": {
"tag": "TOPOLOGY_RESOLVED"
},
"zookeeper-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"cluster-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"livy-log4j-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"mapred-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"webhcat-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-yarn-plugin-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-admin-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-hbase-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-ugsync-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"hivemetastore-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"spark-log4j-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-hbase-policy": {
"tag": "TOPOLOGY_RESOLVED"
},
"hadoop-policy": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hbase-plugin-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"mapred-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"falcon-atlas-application.properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"hive-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-hbase-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"yarn-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"falcon-client.properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"webhcat-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"kafka-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"hive-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hdfs-security": {
"tag": "TOPOLOGY_RESOLVED"
},
"sqoop-atlas-application.properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"hive-interactive-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"llap-daemon-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"sqoop-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"livy-conf": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-yarn-policymgr-ssl": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hbase-audit": {
"tag": "TOPOLOGY_RESOLVED"
},
"livy-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-ssl-client": {
"tag": "TOPOLOGY_RESOLVED"
},
"spark-thrift-fairscheduler": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-hbase-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"hive-atlas-application.properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"admin-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"hcat-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"falcon-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"zoo.cfg": {
"tag": "TOPOLOGY_RESOLVED"
},
"kafka-broker": {
"tag": "TOPOLOGY_RESOLVED"
},
"ams-grafana-ini": {
"tag": "TOPOLOGY_RESOLVED"
},
"tez-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"hive-interactive-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"spark-metrics-properties": {
"tag": "TOPOLOGY_RESOLVED"
},
"hbase-policy": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-kafka-audit": {
"tag": "TOPOLOGY_RESOLVED"
},
"yarn-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"hive-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-yarn-security": {
"tag": "TOPOLOGY_RESOLVED"
},
"hbase-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"oozie-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"ssl-server": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-site": {
"tag": "TOPOLOGY_RESOLVED"
},
"hbase-env": {
"tag": "TOPOLOGY_RESOLVED"
},
"yarn-log4j": {
"tag": "TOPOLOGY_RESOLVED"
},
"ranger-hbase-policymgr-ssl": {
"tag": "TOPOLOGY_RESOLVED"
},
"pig-log4j": {
"tag": "TOPOLOGY_RESOLVED"
}
},
"roleCommand": "INSTALL",
"hostLevelParams": {
"previous_custom_mysql_jdbc_name": "mysql-connector-java.jar",
"agent_stack_retry_on_unavailability": "false",
"stack_name": "HDP",
"custom_mysql_jdbc_name": "mysql-connector-java.jar",
"host_sys_prepped": "false",
"ambari_db_rca_username": "ambari",
"mysql_jdbc_url": "http://mn01.vagrant:8080/resources//mysql-connector-java.jar",
"agent_stack_retry_count": "5",
"stack_version": "2.5",
"ambari_db_rca_driver": "com.mysql.jdbc.Driver",
"java_home": "/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.121-0.b13.el7_3.x86_64",
"jdk_location": "http://mn01.vagrant:8080/resources/",
"not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
"ambari_db_rca_url": "jdbc:mysql://mn01.vagrant:3306/ambari",
"java_version": "8",
"repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.3.0\",\"osType\":\"redhat7\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.3.0\",\"baseSaved\":false},{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos7\",\"osType\":\"redhat7\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos7\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos7\",\"baseSaved\":false}]",
"package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-infra-solr-client\",\"condition\":\"should_install_infra_solr_client\",\"skipUpgrade\":false}]",
"oracle_jdbc_url": "http://mn01.vagrant:8080/resources//ojdbc6.jar",
"group_list": "[\"livy\",\"spark\",\"ranger\",\"hadoop\",\"users\"]",
"agentCacheDir": "/var/lib/ambari-agent/cache",
"ambari_db_rca_password": "bigdata",
"db_name": "ambari",
"db_driver_filename": "mysql-connector-java.jar",
"user_list": "[\"hive\",\"zookeeper\",\"oozie\",\"ams\",\"falcon\",\"ranger\",\"tez\",\"livy\",\"spark\",\"ambari-qa\",\"kafka\",\"hdfs\",\"sqoop\",\"yarn\",\"mapred\",\"hbase\",\"hcat\"]",
"clientsToUpdateConfigs": "[\"*\"]"
},
"commandParams": {
"service_package_folder": "common-services/RANGER/0.4.0/package",
"script": "scripts/ranger_admin.py",
"hooks_folder": "HDP/2.0.6/hooks",
"phase": "INITIAL_INSTALL",
"max_duration_for_retries": "600",
"command_retry_enabled": "true",
"command_timeout": "1800",
"script_type": "PYTHON"
},
"stageId": 0,
"clusterHostInfo": {
"snamenode_host": [
"mn01.vagrant"
],
"nm_hosts": [
"mn01.vagrant"
],
"falcon_server_hosts": [
"mn01.vagrant"
],
"hive_metastore_host": [
"mn01.vagrant"
],
"ranger_usersync_hosts": [
"mn01.vagrant"
],
"kafka_broker_hosts": [
"mn01.vagrant"
],
"slave_hosts": [
"mn01.vagrant"
],
"spark_jobhistoryserver_hosts": [
"mn01.vagrant"
],
"metrics_collector_hosts": [
"mn01.vagrant"
],
"hive_server_host": [
"mn01.vagrant"
],
"hbase_rs_hosts": [
"mn01.vagrant"
],
"webhcat_server_host": [
"mn01.vagrant"
],
"ranger_admin_hosts": [
"mn01.vagrant"
],
"ambari_server_host": [
"mn01.vagrant"
],
"zookeeper_hosts": [
"mn01.vagrant"
],
"app_timeline_server_hosts": [
"mn01.vagrant"
],
"all_ping_ports": [
"8670"
],
"rm_host": [
"mn01.vagrant"
],
"all_hosts": [
"mn01.vagrant"
],
"ambari_server_use_ssl": [
"false"
],
"metrics_monitor_hosts": [
"mn01.vagrant"
],
"oozie_server": [
"mn01.vagrant"
],
"all_racks": [
"/default-rack"
],
"all_ipv4_ips": [
"127.0.0.1"
],
"hs_host": [
"mn01.vagrant"
],
"ambari_server_port": [
"8080"
],
"namenode_host": [
"mn01.vagrant"
],
"hbase_master_hosts": [
"mn01.vagrant"
]
},
"availableServices": {
"SQOOP": "1.4.6.2.5",
"AMBARI_METRICS": "0.1.0",
"KERBEROS": "1.10.3-10",
"RANGER": "0.6.0.2.5",
"ZEPPELIN": "0.6.0.2.5",
"ATLAS": "0.7.0.2.5",
"KNOX": "0.9.0.2.5",
"SMARTSENSE": "1.3.1.0-136",
"RANGER_KMS": "0.6.0.2.5",
"FLUME": "1.5.2.2.5",
"GANGLIA": "3.5.0",
"YARN": "2.7.1.2.5",
"PIG": "0.16.0.2.5",
"MAHOUT": "0.9.0.2.5",
"TEZ": "0.7.0.2.5",
"MAPREDUCE2": "2.7.1.2.4",
"OOZIE": "4.2.0.2.5",
"SPARK": "1.6.x.2.5",
"ACCUMULO": "1.7.0.2.5",
"LOGSEARCH": "0.5.0",
"SPARK2": "2.0.x.2.5",
"SLIDER": "0.80.0.2.5",
"STORM": "1.0.1.2.5",
"FALCON": "0.10.0.2.5",
"HDFS": "2.7.1.2.5",
"ZOOKEEPER": "3.4.6.2.5",
"HIVE": "1.2.1.2.5",
"KAFKA": "0.10.0.2.5",
"AMBARI_INFRA": "0.1.0",
"HBASE": "1.1.2.2.5"
},
"configurations": {
"spark-defaults": {
"spark.yarn.scheduler.heartbeat.interval-ms": "5000",
"spark.eventLog.dir": "hdfs:///spark-history",
"spark.history.kerberos.keytab": "/etc/security/keytabs/spark.headless.keytab",
"spark.history.fs.logDirectory": "hdfs:///spark-history",
"spark.yarn.preserve.staging.files": "false",
"spark.yarn.submit.file.replication": "3",
"spark.history.kerberos.principal": "spark-examplecluster@HADOOP.TEST",
"spark.yarn.historyServer.address": "{{spark_history_server_host}}:{{spark_history_ui_port}}",
"spark.driver.extraLibraryPath": "{{spark_hadoop_lib_native}}",
"spark.yarn.queue": "default",
"spark.yarn.containerLauncherMaxThreads": "25",
"spark.yarn.driver.memoryOverhead": "384",
"spark.history.ui.port": "18080",
"spark.yarn.executor.memoryOverhead": "384",
"spark.history.kerberos.enabled": "true",
"spark.executor.extraLibraryPath": "{{spark_hadoop_lib_native}}",
"spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider",
"spark.eventLog.enabled": "true"
},
"sqoop-site": {},
"ranger-hdfs-audit": {
"xasecure.audit.destination.solr.zookeepers": "NONE",
"xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
"xasecure.audit.destination.solr.urls": "",
"xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool",
"xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
"xasecure.audit.jaas.Client.option.storeKey": "false",
"xasecure.audit.destination.hdfs": "true",
"xasecure.audit.is.enabled": "true",
"xasecure.audit.jaas.Client.option.useKeyTab": "true",
"xasecure.audit.destination.solr": "false",
"xasecure.audit.jaas.Client.option.keyTab": "/etc/security/keytabs/nn.service.keytab",
"xasecure.audit.provider.summary.enabled": "false",
"xasecure.audit.jaas.Client.option.principal": "nn/_HOST@HADOOP.TEST",
"xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
"xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
"xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
"xasecure.audit.jaas.Client.option.serviceName": "solr"
},
"ranger-hdfs-policymgr-ssl": {
"xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
"xasecure.policymgr.clientssl.truststore.password": "changeit",
"xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
"xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
},
"pig-env": {
"content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
},
"ranger-kafka-plugin-properties": {
"hadoop.rpc.protection": "",
"REPOSITORY_CONFIG_USERNAME": "kafka",
"zookeeper.connect": "localhost:2181",
"policy_user": "ambari-qa",
"common.name.for.certificate": "",
"REPOSITORY_CONFIG_PASSWORD": "kafka",
"ranger-kafka-plugin-enabled": "Yes"
},
"ams-grafana-env": {
"metrics_grafana_username": "admin",
"metrics_grafana_pid_dir": "/var/run/ambari-metrics-grafana",
"metrics_grafana_data_dir": "/var/lib/ambari-metrics-grafana",
"content": "\n# Set environment variables here.\n\n# AMS UI Server Home Dir\nexport AMS_GRAFANA_HOME_DIR={{ams_grafana_home_dir}}\n\n# AMS UI Server Data Dir\nexport AMS_GRAFANA_DATA_DIR={{ams_grafana_data_dir}}\n\n# AMS UI Server Log Dir\nexport AMS_GRAFANA_LOG_DIR={{ams_grafana_log_dir}}\n\n# AMS UI Server PID Dir\nexport AMS_GRAFANA_PID_DIR={{ams_grafana_pid_dir}}",
"metrics_grafana_password": "bigdata",
"metrics_grafana_log_dir": "/var/log/ambari-metrics-grafana"
},
"usersync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
},
"llap-cli-log4j2": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nstatus = WARN\nname = LlapCliLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.hive.log.level = INFO\nproperty.hive.root.logger = console\nproperty.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}\nproperty.hive.log.file = llap-cli.log\n\n# list of all appenders\nappenders = console, DRFA\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %p %c{2}: %m%n\n\n# daily rolling file appender\nappender.DRFA.type = RollingRandomAccessFile\nappender.DRFA.name = DRFA\nappender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}\n# Use %pid in the filePattern to append process-id@host-name to the filename if you want separate log files for different CLI session\nappender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}\nappender.DRFA.layout.type = PatternLayout\nappender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n\nappender.DRFA.policies.type = Policies\nappender.DRFA.policies.time.type = TimeBasedTriggeringPolicy\nappender.DRFA.policies.time.interval = 1\nappender.DRFA.policies.time.modulate = true\nappender.DRFA.strategy.type = DefaultRolloverStrategy\nappender.DRFA.strategy.max = 30\n\n# list of all loggers\nloggers = ZooKeeper, DataNucleus, Datastore, JPOX, HadoopConf\n\nlogger.ZooKeeper.name = org.apache.zookeeper\nlogger.ZooKeeper.level = WARN\n\nlogger.DataNucleus.name = DataNucleus\nlogger.DataNucleus.level = ERROR\n\nlogger.Datastore.name = Datastore\nlogger.Datastore.level = ERROR\n\nlogger.JPOX.name = JPOX\nlogger.JPOX.level = ERROR\n\nlogger.HadoopConf.name = org.apache.hadoop.conf.Configuration\nlogger.HadoopConf.level = ERROR\n\n# root logger\nrootLogger.level = ${sys:hive.log.level}\nrootLogger.appenderRefs = root, DRFA\nrootLogger.appenderRef.root.ref = ${sys:hive.root.logger}\nrootLogger.appenderRef.DRFA.ref = DRFA"
},
"spark-hive-site-override": {
"hive.server2.enable.doAs": "false",
"hive.metastore.client.connect.retry.delay": "5",
"hive.server2.transport.mode": "binary",
"hive.server2.thrift.port": "10015",
"hive.metastore.client.socket.timeout": "1800"
},
"falcon-runtime.properties": {
"*.log.cleanup.frequency.hours.retention": "minutes(1)",
"*.domain": "${falcon.app.type}",
"*.log.cleanup.frequency.months.retention": "months(3)",
"*.log.cleanup.frequency.minutes.retention": "hours(6)",
"*.log.cleanup.frequency.days.retention": "days(7)"
},
"ranger-hive-security": {
"ranger.plugin.hive.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
"ranger.plugin.hive.policy.rest.ssl.config.file": "/usr/hdp/current/{{ranger_hive_component}}/conf/conf.server/ranger-policymgr-ssl.xml",
"xasecure.hive.update.xapolicies.on.grant.revoke": "true",
"ranger.plugin.hive.service.name": "{{repo_name}}",
"ranger.plugin.hive.policy.rest.url": "{{policymgr_mgr_url}}",
"ranger.plugin.hive.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
"ranger.plugin.hive.policy.pollIntervalMs": "30000"
},
"falcon-log4j": {
"content": "\n<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!--\n Licensed to the Apache Software Foundation (ASF) under one\n or more contributor license agreements. See the NOTICE file\n distributed with this work for additional information\n regarding copyright ownership. The ASF licenses this file\n to you under the Apache License, Version 2.0 (the\n \"License\"); you may not use this file except in compliance\n with the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n -->\n\n<!--\n This is used for falcon packaging only.\n -->\n\n<!DOCTYPE log4j:configuration SYSTEM \"log4j.dtd\">\n\n<log4j:configuration xmlns:log4j=\"http://jakarta.apache.org/log4j/\">\n <appender name=\"FILE\" class=\"org.apache.log4j.DailyRollingFileAppender\">\n <param name=\"File\" value=\"${falcon.log.dir}/${falcon.app.type}.application.log\"/>\n <param name=\"Append\" value=\"true\"/>\n <param name=\"Threshold\" value=\"debug\"/>\n <layout class=\"org.apache.log4j.PatternLayout\">\n <param name=\"ConversionPattern\" value=\"%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n\"/>\n </layout>\n </appender>\n\n <appender name=\"AUDIT\" class=\"org.apache.log4j.DailyRollingFileAppender\">\n <param name=\"File\" value=\"${falcon.log.dir}/${falcon.app.type}.audit.log\"/>\n <param name=\"Append\" value=\"true\"/>\n <param name=\"Threshold\" value=\"debug\"/>\n <layout class=\"org.apache.log4j.PatternLayout\">\n <param name=\"ConversionPattern\" value=\"%d %x %m%n\"/>\n </layout>\n </appender>\n\n <appender name=\"METRIC\" class=\"org.apache.log4j.DailyRollingFileAppender\">\n <param name=\"File\" value=\"${falcon.log.dir}/${falcon.app.type}.metric.log\"/>\n <param name=\"Append\" value=\"true\"/>\n <param name=\"Threshold\" value=\"debug\"/>\n <layout class=\"org.apache.log4j.PatternLayout\">\n <param name=\"ConversionPattern\" value=\"%d %m%n\"/>\n </layout>\n </appender>\n\n <appender name=\"FeedSLA\" class=\"org.apache.log4j.DailyRollingFileAppender\">\n <param name=\"File\" value=\"${falcon.log.dir}/${falcon.app.type}.feed.sla.log\"/>\n <param name=\"Append\" value=\"true\"/>\n <param name=\"Threshold\" value=\"debug\"/>\n <layout class=\"org.apache.log4j.PatternLayout\">\n <param name=\"ConversionPattern\" value=\"%d %m%n\"/>\n </layout>\n </appender>\n\n <appender name=\"ALERT\" class=\"org.apache.log4j.DailyRollingFileAppender\">\n <param name=\"File\" value=\"${falcon.log.dir}/${falcon.app.type}.alerts.log\"/>\n <param name=\"Append\" value=\"true\"/>\n <param name=\"Threshold\" value=\"debug\"/>\n <layout class=\"org.apache.log4j.PatternLayout\">\n <param name=\"ConversionPattern\" value=\"%d %m%n\"/>\n </layout>\n </appender>\n\n <appender name=\"SECURITY\" class=\"org.apache.log4j.DailyRollingFileAppender\">\n <param name=\"File\" value=\"${falcon.log.dir}/${falcon.app.type}.security.audit.log\"/>\n <param name=\"Append\" value=\"true\"/>\n <param name=\"Threshold\" value=\"debug\"/>\n <layout class=\"org.apache.log4j.PatternLayout\">\n <param name=\"ConversionPattern\" value=\"%d %x %m%n\"/>\n </layout>\n </appender>\n\n <logger name=\"org.apache.falcon\" additivity=\"false\">\n <level value=\"debug\"/>\n <appender-ref ref=\"FILE\"/>\n </logger>\n\n <logger name=\"AUDIT\">\n <level value=\"info\"/>\n <appender-ref ref=\"AUDIT\"/>\n </logger>\n\n <logger name=\"METRIC\">\n <level value=\"info\"/>\n <appender-ref ref=\"METRIC\"/>\n </logger>\n\n <logger name=\"FeedSLA\">\n <level value=\"debug\"/>\n <appender-ref ref=\"FeedSLA\"/>\n </logger>\n\n <logger name=\"org.apache.hadoop.security\" additivity=\"false\">\n <level value=\"info\"/>\n <appender-ref ref=\"SECURITY\"/>\n </logger>\n\n <logger name=\"org.apache.hadoop\" additivity=\"false\">\n <level value=\"info\"/>\n <appender-ref ref=\"FILE\"/>\n </logger>\n\n <logger name=\"org.apache.oozie\" additivity=\"false\">\n <level value=\"info\"/>\n <appender-ref ref=\"FILE\"/>\n </logger>\n\n <logger name=\"org.apache.hadoop.hive\" additivity=\"false\">\n <level value=\"info\"/>\n <appender-ref ref=\"FILE\"/>\n </logger>\n\n <root>\n <priority value=\"info\"/>\n <appender-ref ref=\"FILE\"/>\n </root>\n\n</log4j:configuration>"
},
"kerberos-env": {
"kdc_hosts": "",
"manage_auth_to_local": "true",
"install_packages": "true",
"realm": "HADOOP.TEST",
"encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5",
"ad_create_attributes_template": "\n{\n \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n \"cn\": \"$principal_name\",\n #if( $is_service )\n \"servicePrincipalName\": \"$principal_name\",\n #end\n \"userPrincipalName\": \"$normalized_principal\",\n \"unicodePwd\": \"$password\",\n \"accountExpires\": \"0\",\n \"userAccountControl\": \"66048\"\n}",
"kdc_create_attributes": "",
"admin_server_host": "mn01.vagrant",
"group": "ambari-managed-principals",
"password_length": "20",
"ldap_url": "",
"manage_identities": "true",
"kdc_host": "mn01.vagrant",
"password_min_lowercase_letters": "1",
"create_ambari_principal": "true",
"service_check_principal_name": "${cluster_name|toLower()}-${short_date}",
"executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin",
"password_chat_timeout": "5",
"kdc_type": "mit-kdc",
"set_password_expiry": "false",
"password_min_punctuation": "1",
"container_dn": "",
"case_insensitive_username_rules": "false",
"password_min_whitespace": "0",
"password_min_uppercase_letters": "1",
"password_min_digits": "1"
},
"ams-hbase-security-site": {
"hbase.master.kerberos.principal": "amshbase/_HOST@HADOOP.TEST",
"hbase.zookeeper.property.kerberos.removeHostFromPrincipal": "true",
"hadoop.security.authentication": "kerberos",
"hbase.security.authorization": "true",
"hbase.myclient.principal": "amshbase/_HOST@HADOOP.TEST",
"hbase.regionserver.keytab.file": "/etc/security/keytabs/ams-hbase.regionserver.keytab",
"ams.zookeeper.principal": "amszk/_HOST@HADOOP.TEST",
"hbase.regionserver.kerberos.principal": "amshbase/_HOST@HADOOP.TEST",
"hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.AccessController",
"hbase.zookeeper.property.authProvider.1": "org.apache.zookeeper.server.auth.SASLAuthenticationProvider",
"hbase.zookeeper.property.kerberos.removeRealmFromPrincipal": "true",
"hbase.master.keytab.file": "/etc/security/keytabs/ams-hbase.master.keytab",
"hbase.security.authentication": "kerberos",
"hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController",
"hbase.zookeeper.property.jaasLoginRenew": "3600000",
"hbase.myclient.keytab": "/etc/security/keytabs/ams.collector.keytab",
"ams.zookeeper.keytab": "/etc/security/keytabs/ams-zk.service.keytab"
},
"oozie-env": {
"oozie_heapsize": "2048m",
"oozie_log_dir": "/var/log/oozie",
"oozie_admin_port": "11001",
"oozie_user_nproc_limit": "16000",
"oozie_data_dir": "/hadoop/oozie/data",
"oozie_pid_dir": "/var/run/oozie",
"oozie_admin_users": "{oozie_user}, oozie-admin",
"oozie_user_nofile_limit": "32000",
"content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n export OOZIE_CONFIG=${OOZIE_CONFIG:-{{conf_dir}}}\n export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}\n export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n{% if java_version < 8 %}\nexport CATALINA_OPTS=\"$CATALINA_OPTS -Xmx{{oozie_heapsize}} -XX:MaxPermSize={{oozie_permsize}}\"\n{% else %}\nexport CATALINA_OPTS=\"$CATALINA_OPTS -Xmx{{oozie_heapsize}}\"\n{% endif %}\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie configuration directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64\n\n# At least 1 minute of retry time to account for server downtime during\n# upgrade/downgrade\nexport OOZIE_CLIENT_OPTS=\"${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 \"\n\n{% if sqla_db_used or lib_dir_available %}\nexport LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:{{jdbc_libs_dir}}\"\nexport JAVA_LIBRARY_PATH=\"$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n{% endif %}",
"oozie_user": "oozie",
"oozie_database": "Existing MySQL Database",
"oozie_tmp_dir": "/var/tmp/oozie",
"oozie_permsize": "256m",
"oozie_hostname": " mn01.vagrant"
},
"spark-env": {
"spark_pid_dir": "/var/run/spark",
"spark_daemon_memory": "1024",
"hive_kerberos_keytab": "{{hive_kerberos_keytab}}",
"spark_user": "spark",
"content": "\n#!/usr/bin/env bash\n\n# This file is sourced when running various Spark programs.\n# Copy it as spark-env.sh and edit that to configure Spark for your site.\n\n# Options read in YARN client mode\n#SPARK_EXECUTOR_INSTANCES=\"2\" #Number of workers to start (Default: 2)\n#SPARK_EXECUTOR_CORES=\"1\" #Number of cores for the workers (Default: 1).\n#SPARK_EXECUTOR_MEMORY=\"1G\" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)\n#SPARK_DRIVER_MEMORY=\"512M\" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)\n#SPARK_YARN_APP_NAME=\"spark\" #The name of your application (Default: Spark)\n#SPARK_YARN_QUEUE=\"~@~Xdefault~@~Y\" #The hadoop queue to use for allocation requests (Default: @~Xdefault~@~Y)\n#SPARK_YARN_DIST_FILES=\"\" #Comma separated list of files to be distributed with the job.\n#SPARK_YARN_DIST_ARCHIVES=\"\" #Comma separated list of archives to be distributed with the job.\n\n# Generic options for the daemons used in the standalone deploy mode\n\n# Alternate conf dir. (Default: ${SPARK_HOME}/conf)\nexport SPARK_CONF_DIR=${SPARK_CONF_DIR:-{{spark_home}}/conf}\n\n# Where log files are stored.(Default:${SPARK_HOME}/logs)\n#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs\nexport SPARK_LOG_DIR={{spark_log_dir}}\n\n# Where the pid file is stored. (Default: /tmp)\nexport SPARK_PID_DIR={{spark_pid_dir}}\n\n# A string representing this instance of spark.(Default: $USER)\nSPARK_IDENT_STRING=$USER\n\n# The scheduling priority for daemons. (Default: 0)\nSPARK_NICENESS=0\n\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\n#Memory for Master, Worker and history server (default: 1024MB)\nexport SPARK_DAEMON_MEMORY={{spark_daemon_memory}}m\n\nif [ -d \"/etc/tez/conf/\" ]; then\n export TEZ_CONF_DIR=/etc/tez/conf\nelse\n export TEZ_CONF_DIR=\nfi",
"spark_thrift_cmd_opts": "",
"spark_log_dir": "/var/log/spark",
"spark_group": "spark",
"hive_kerberos_principal": "{{hive_kerberos_principal}}"
},
"hdfs-site": {
"dfs.namenode.checkpoint.period": "21600",
"dfs.namenode.avoid.write.stale.datanode": "true",
"dfs.permissions.superusergroup": "hdfs",
"dfs.namenode.startup.delay.block.deletion.sec": "3600",
"dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@HADOOP.TEST",
"dfs.datanode.kerberos.principal": "dn/_HOST@HADOOP.TEST",
"dfs.heartbeat.interval": "3",
"dfs.content-summary.limit": "5000",
"dfs.support.append": "true",
"dfs.datanode.address": "0.0.0.0:1019",
"dfs.cluster.administrators": "hdfs",
"dfs.namenode.audit.log.async": "true",
"dfs.datanode.balance.bandwidthPerSec": "6250000",
"dfs.namenode.safemode.threshold-pct": "1.000",
"dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
"dfs.namenode.rpc-address": "mn01.vagrant:8020",
"dfs.permissions.enabled": "true",
"dfs.namenode.kerberos.principal": "nn/_HOST@HADOOP.TEST",
"dfs.client.read.shortcircuit": "true",
"dfs.https.port": "50470",
"dfs.namenode.https-address": "mn01.vagrant:50470",
"nfs.file.dump.dir": "/tmp/.hdfs-nfs",
"dfs.blocksize": "134217728",
"dfs.blockreport.initialDelay": "120",
"dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
"dfs.namenode.fslock.fair": "false",
"dfs.datanode.max.transfer.threads": "16384",
"dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@HADOOP.TEST",
"dfs.replication": "3",
"dfs.namenode.handler.count": "100",
"dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
"fs.permissions.umask-mode": "022",
"dfs.namenode.stale.datanode.interval": "30000",
"dfs.datanode.ipc.address": "0.0.0.0:8010",
"dfs.datanode.failed.volumes.tolerated": "0",
"dfs.datanode.data.dir": "/hadoop/hdfs/data",
"dfs.namenode.http-address": "mn01.vagrant:50070",
"dfs.webhdfs.enabled": "true",
"dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding",
"dfs.namenode.accesstime.precision": "0",
"dfs.namenode.write.stale.datanode.ratio": "1.0f",
"dfs.namenode.checkpoint.txns": "1000000",
"dfs.datanode.https.address": "0.0.0.0:50475",
"dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"dfs.namenode.secondary.http-address": "mn01.vagrant:50090",
"nfs.exports.allowed.hosts": "* rw",
"dfs.namenode.inode.attributes.provider.class": "org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer",
"dfs.datanode.http.address": "0.0.0.0:1022",
"dfs.datanode.du.reserved": "4642923520",
"dfs.client.read.shortcircuit.streams.cache.size": "4096",
"dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
"dfs.web.authentication.kerberos.principal": "HTTP/_HOST@HADOOP.TEST",
"dfs.http.policy": "HTTP_ONLY",
"dfs.block.access.token.enable": "true",
"dfs.client.retry.policy.enabled": "false",
"dfs.secondary.namenode.kerberos.principal": "nn/_HOST@HADOOP.TEST",
"dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
"dfs.namenode.name.dir.restore": "true",
"dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
"dfs.journalnode.https-address": "0.0.0.0:8481",
"dfs.journalnode.http-address": "0.0.0.0:8480",
"dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
"dfs.namenode.avoid.read.stale.datanode": "true",
"dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
"dfs.datanode.data.dir.perm": "750",
"dfs.encryption.key.provider.uri": "",
"dfs.replication.max": "50",
"dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
},
"ams-env": {
"ambari_metrics_user": "ams",
"metrics_monitor_log_dir": "/var/log/ambari-metrics-monitor",
"metrics_collector_log_dir": "/var/log/ambari-metrics-collector",
"metrics_monitor_pid_dir": "/var/run/ambari-metrics-monitor",
"content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# Collector Log directory for log4j\nexport AMS_COLLECTOR_LOG_DIR={{ams_collector_log_dir}}\n\n# Monitor Log directory for outfile\nexport AMS_MONITOR_LOG_DIR={{ams_monitor_log_dir}}\n\n# Collector pid directory\nexport AMS_COLLECTOR_PID_DIR={{ams_collector_pid_dir}}\n\n# Monitor pid directory\nexport AMS_MONITOR_PID_DIR={{ams_monitor_pid_dir}}\n\n# AMS HBase pid directory\nexport AMS_HBASE_PID_DIR={{hbase_pid_dir}}\n\n# AMS Collector heapsize\nexport AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}\n\n# HBase normalizer enabled\nexport AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}\n\n# HBase compaction policy enabled\nexport AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}\n\n# HBase Tables Initialization check enabled\nexport AMS_HBASE_INIT_CHECK_ENABLED={{ams_hbase_init_check_enabled}}\n\n# AMS Collector options\nexport AMS_COLLECTOR_OPTS=\"-Djava.library.path=/usr/lib/ams-hbase/lib/hadoop-native\"\n{% if security_enabled %}\nexport AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS -Djava.security.auth.login.config={{ams_collector_jaas_config_file}}\"\n{% endif %}\n\n# AMS Collector GC options\nexport AMS_COLLECTOR_GC_OPTS=\"-XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +'%Y%m%d%H%M'`\"\nexport AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS\"",
"metrics_collector_pid_dir": "/var/run/ambari-metrics-collector",
"metrics_collector_heapsize": "512"
},
"zookeeper-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
},
"hdfs-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
},
"ranger-yarn-audit": {
"xasecure.audit.destination.solr.zookeepers": "NONE",
"xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
"xasecure.audit.destination.solr.urls": "",
"xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/yarn/audit/solr/spool",
"xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/yarn/audit/hdfs/spool",
"xasecure.audit.jaas.Client.option.storeKey": "false",
"xasecure.audit.destination.hdfs": "true",
"xasecure.audit.is.enabled": "true",
"xasecure.audit.jaas.Client.option.useKeyTab": "true",
"xasecure.audit.destination.solr": "false",
"xasecure.audit.jaas.Client.option.keyTab": "/etc/security/keytabs/rm.service.keytab",
"xasecure.audit.provider.summary.enabled": "false",
"xasecure.audit.jaas.Client.option.principal": "rm/_HOST@HADOOP.TEST",
"xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
"xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
"xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
"xasecure.audit.jaas.Client.option.serviceName": "solr"
},
"ranger-hdfs-plugin-properties": {
"hadoop.rpc.protection": "",
"ranger-hdfs-plugin-enabled": "Yes",
"REPOSITORY_CONFIG_USERNAME": "hadoop",
"policy_user": "ambari-qa",
"common.name.for.certificate": "",
"REPOSITORY_CONFIG_PASSWORD": "hadoop"
},
"pig-properties": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Pig configuration file. All values can be overwritten by command line\n# arguments; for a description of the properties, run\n#\n# pig -h properties\n#\n\n############################################################################\n#\n# == Logging properties\n#\n\n# Location of pig log file. If blank, a file with a timestamped slug\n# ('pig_1399336559369.log') will be generated in the current working directory.\n#\n# pig.logfile=\n# pig.logfile=/tmp/pig-err.log\n\n# Log4j configuration file. Set at runtime with the -4 parameter. The source\n# distribution has a ./conf/log4j.properties.template file you can rename and\n# customize.\n#\n# log4jconf=./conf/log4j.properties\n\n# Verbose Output.\n# * false (default): print only INFO and above to screen\n# * true: Print all log messages to screen\n#\n# verbose=false\n\n# Omit timestamps on log messages. (default: false)\n#\n# brief=false\n\n# Logging level. debug=OFF|ERROR|WARN|INFO|DEBUG (default: INFO)\n#\n# debug=INFO\n\n# Roll up warnings across tasks, so that when millions of mappers suddenly cry\n# out in error they are partially silenced. (default, recommended: true)\n#\n# aggregate.warning=true\n\n# Should DESCRIBE pretty-print its schema?\n# * false (default): print on a single-line, suitable for pasting back in to your script\n# * true (recommended): prints on multiple lines with indentation, much more readable\n#\n# pig.pretty.print.schema=false\n\n# === Profiling UDFs ===\n\n# Turn on UDF timers? This will cause two counters to be\n# tracked for every UDF and LoadFunc in your script: approx_microsecs measures\n# approximate time spent inside a UDF approx_invocations reports the approximate\n# number of times the UDF was invoked.\n#\n# * false (default): do not record timing information of UDFs.\n# * true: report UDF performance. Uses more counters, but gives more insight\n# into script operation\n#\n# pig.udf.profile=false\n\n# Specify frequency of profiling (default: every 100th).\n# pig.udf.profile.frequency=100\n\n############################################################################\n#\n# == Site-specific Properties\n#\n\n# Execution Mode. Local mode is much faster, but only suitable for small amounts\n# of data. Local mode interprets paths on the local file system; Mapreduce mode\n# on the HDFS. Read more under 'Execution Modes' within the Getting Started\n# documentation.\n#\n# * mapreduce (default): use the Hadoop cluster defined in your Hadoop config files\n# * local: use local mode\n# * tez: use Tez on Hadoop cluster\n# * tez_local: use Tez local mode\n#\n# exectype=mapreduce\n\n# Bootstrap file with default statements to execute in every Pig job, similar to\n# .bashrc. If blank, uses the file '.pigbootup' from your home directory; If a\n# value is supplied, that file is NOT loaded. This does not do tilde expansion\n# -- you must supply the full path to the file.\n#\n# pig.load.default.statements=\n# pig.load.default.statements=/home/bob/.pigrc\n\n# Kill all waiting/running MR jobs upon a MR job failure? (default: false) If\n# false, jobs that can proceed independently will do so unless a parent stage\n# fails. If true, the failure of any stage in the script kills all jobs.\n#\n# stop.on.failure=false\n\n# File containing the pig script to run. Rarely set in the properties file.\n# Commandline: -f\n#\n# file=\n\n# Jarfile to load, colon separated. Rarely used.\n#\n# jar=\n\n# Register additional .jar files to use with your Pig script.\n# Most typically used as a command line option (see http://pig.apache.org/docs/r0.12.0/basic.html#register):\n#\n# pig -Dpig.additional.jars=hdfs://nn.mydomain.com:9020/myjars/my.jar\n#\n# pig.additional.jars=<colon separated list of jars with optional wildcards>\n# pig.additional.jars=/usr/local/share/pig/pig/contrib/piggybank/java/piggybank.jar:/usr/local/share/pig/datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar\n\n# Specify potential packages to which a UDF or a group of UDFs belong,\n# eliminating the need to qualify the UDF on every call. See\n# http://pig.apache.org/docs/r0.12.0/udf.html#use-short-names\n#\n# Commandline use:\n#\n# pig \\\n# -Dpig.additional.jars=$PIG_HOME/contrib/piggybank/java/piggybank.jar:$PIG_HOME/../datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar \\\n# -Dudf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.util \\\n# happy_job.pig\n#\n# udf.import.list=<colon separated list of imports>\n# udf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.bags:datafu.pig.hash:datafu.pig.stats:datafu.pig.util\n\n#\n# Reuse jars across jobs run by the same user? (default: false) If enabled, jars\n# are placed in ${pig.user.cache.location}/${user.name}/.pigcache. Since most\n# jars change infrequently, this gives a minor speedup.\n#\n# pig.user.cache.enabled=false\n\n# Base path for storing jars cached by the pig.user.cache.enabled feature. (default: /tmp)\n#\n# pig.user.cache.location=/tmp\n\n# Replication factor for cached jars. If not specified mapred.submit.replication\n# is used, whose default is 10.\n#\n# pig.user.cache.replication=10\n\n# Default UTC offset. (default: the host's current UTC offset) Supply a UTC\n# offset in Java's timezone format: e.g., +08:00.\n#\n# pig.datetime.default.tz=\n\n############################################################################\n#\n# Memory impacting properties\n#\n\n# Amount of memory (as fraction of heap) allocated to bags before a spill is\n# forced. Default is 0.2, meaning 20% of available memory. Note that this memory\n# is shared across all large bags used by the application. See\n# http://pig.apache.org/docs/r0.12.0/perf.html#memory-management\n#\n# pig.cachedbag.memusage=0.2\n\n# Don't spill bags smaller than this size (bytes). Default: 5000000, or about\n# 5MB. Usually, the more spilling the longer runtime, so you might want to tune\n# it according to heap size of each task and so forth.\n#\n# pig.spill.size.threshold=5000000\n\n# EXPERIMENTAL: If a file bigger than this size (bytes) is spilled -- thus\n# freeing a bunch of ram -- tell the JVM to perform garbage collection. This\n# should help reduce the number of files being spilled, but causes more-frequent\n# garbage collection. Default: 40000000 (about 40 MB)\n#\n# pig.spill.gc.activation.size=40000000\n\n# Maximum amount of data to replicate using the distributed cache when doing\n# fragment-replicated join. (default: 1000000000, about 1GB) Consider increasing\n# this in a production environment, but carefully.\n#\n# pig.join.replicated.max.bytes=1000000000\n\n# Fraction of heap available for the reducer to perform a skewed join. A low\n# fraction forces Pig to use more reducers, but increases the copying cost. See\n# http://pig.apache.org/docs/r0.12.0/perf.html#skewed-joins\n#\n# pig.skewedjoin.reduce.memusage=0.3\n\n#\n# === SchemaTuple ===\n#\n# The SchemaTuple feature (PIG-2632) uses a tuple's schema (when known) to\n# generate a custom Java class to hold records. Otherwise, tuples are loaded as\n# a plain list that is unaware of its contents' schema -- and so each element\n# has to be wrapped as a Java object on its own. This can provide more efficient\n# CPU utilization, serialization, and most of all memory usage.\n#\n# This feature is considered experimental and is off by default. You can\n# selectively enable it for specific operations using pig.schematuple.udf,\n# pig.schematuple.load, pig.schematuple.fr_join and pig.schematuple.merge_join\n#\n\n# Enable the SchemaTuple optimization in all available cases? (default: false; recommended: true)\n#\n# pig.schematuple=false\n\n# EXPERIMENTAL: Use SchemaTuples with UDFs (default: value of pig.schematuple).\n# pig.schematuple.udf=false\n\n# EXPERIMENTAL, CURRENTLY NOT IMPLEMENTED, but in the future, LoadFunc's with\n# known schemas should output SchemaTuples. (default: value of pig.schematuple)\n# pig.schematuple.load=false\n\n# EXPERIMENTAL: Use SchemaTuples in replicated joins. The potential memory\n# saving here is significant. (default: value of pig.schematuple)\n# pig.schematuple.fr_join=false\n\n# EXPERIMENTAL: Use SchemaTuples in merge joins. (default: value of pig.schematuple).\n# pig.schematuple.merge_join=false\n\n############################################################################\n#\n# Serialization options\n#\n\n# Omit empty part files from the output? (default: false)\n#\n# * false (default): reducers generates an output file, even if output is empty\n# * true (recommended): do not generate zero-byte part files\n#\n# The default behavior of MapReduce is to generate an empty file for no data, so\n# Pig follows that. But many small files can cause annoying extra map tasks and\n# put load on the HDFS, so consider setting this to 'true'\n#\n# pig.output.lazy=false\n\n#\n# === Tempfile Handling\n#\n\n# EXPERIMENTAL: Storage format for temporary files generated by intermediate\n# stages of Pig jobs. This can provide significant speed increases for certain\n# codecs, as reducing the amount of data transferred to and from disk can more\n# than make up for the cost of compression/compression. Recommend that you set\n# up LZO compression in Hadoop and specify tfile storage.\n#\n# Compress temporary files?\n# * false (default): do not compress\n# * true (recommended): compress temporary files.\n#\n# pig.tmpfilecompression=false\n# pig.tmpfilecompression=true\n\n# Tempfile storage container type.\n#\n# * tfile (default, recommended): more efficient, but only supports supports gz(gzip) and lzo compression.\n# https://issues.apache.org/jira/secure/attachment/12396286/TFile%20Specification%2020081217.pdf\n# * seqfile: only supports gz(gzip), lzo, snappy, and bzip2 compression\n#\n# pig.tmpfilecompression.storage=tfile\n\n# Codec types for intermediate job files. tfile supports gz(gzip) and lzo;\n# seqfile support gz(gzip), lzo, snappy, bzip2\n#\n# * lzo (recommended with caveats): moderate compression, low cpu burden;\n# typically leads to a noticeable speedup. Best default choice, but you must\n# set up LZO independently due to license incompatibility\n# * snappy: moderate compression, low cpu burden; typically leads to a noticeable speedup..\n# * gz (default): higher compression, high CPU burden. Typically leads to a noticeable slowdown.\n# * bzip2: most compression, major CPU burden. Typically leads to a noticeable slowdown.\n#\n# pig.tmpfilecompression.codec=gzip\n\n#\n# === Split Combining\n#\n\n#\n# Should pig try to combine small files for fewer map tasks? This improves the\n# efficiency of jobs with many small input files, reduces the overhead on the\n# jobtracker, and reduces the number of output files a map-only job\n# produces. However, it only works with certain loaders and increases non-local\n# map tasks. See http://pig.apache.org/docs/r0.12.0/perf.html#combine-files\n#\n# * false (default, recommended): _do_ combine files\n# * true: do not combine files\n#\n# pig.noSplitCombination=false\n\n#\n# Size, in bytes, of data to be processed by a single map. Smaller files are\n# combined untill this size is reached. If unset, defaults to the file system's\n# default block size.\n#\n# pig.maxCombinedSplitSize=\n\n# ###########################################################################\n#\n# Execution options\n#\n\n# Should pig omit combiners? (default, recommended: false -- meaning pig _will_\n# use combiners)\n#\n# When combiners work well, they eliminate a significant amount of\n# data. However, if they do not eliminate much data -- say, a DISTINCT operation\n# that only eliminates 5% of the records -- they add a noticeable overhead to\n# the job. So the recommended default is false (use combiners), selectively\n# disabling them per-job:\n#\n# pig -Dpig.exec.nocombiner=true distinct_but_not_too_much.pig\n#\n# pig.exec.nocombiner=false\n\n# EXPERIMENTAL: Aggregate records in map task before sending to the combiner?\n# (default: false, 10; recommended: true, 10). In cases where there is a massive\n# reduction of data in the aggregation step, pig can do a first pass of\n# aggregation before the data even leaves the mapper, saving much serialization\n# overhead. It's off by default but can give a major improvement to\n# group-and-aggregate operations. Pig skips partial aggregation unless reduction\n# is better than a factor of minReduction (default: 10). See\n# http://pig.apache.org/docs/r0.12.0/perf.html#hash-based-aggregation\n#\n# pig.exec.mapPartAgg=false\n# pig.exec.mapPartAgg.minReduction=10\n\n#\n# === Control how many reducers are used.\n#\n\n# Estimate number of reducers naively using a fixed amount of data per\n# reducer. Optimally, you have both fewer reducers than available reduce slots,\n# and reducers that are neither getting too little data (less than a half-GB or\n# so) nor too much data (more than 2-3 times the reducer child process max heap\n# size). The default of 1000000000 (about 1GB) is probably low for a production\n# cluster -- however it's much worse to set this too high (reducers spill many\n# times over in group-sort) than too low (delay waiting for reduce slots).\n#\n# pig.exec.reducers.bytes.per.reducer=1000000000\n\n#\n# Don't ever use more than this many reducers. (default: 999)\n#\n# pig.exec.reducers.max=999\n\n#\n# === Local mode for small jobs\n#\n\n# EXPERIMENTAL: Use local mode for small jobs? If true, jobs with input data\n# size smaller than pig.auto.local.input.maxbytes bytes and one or no reducers\n# are run in local mode, which is much faster. Note that file paths are still\n# interpreted as pig.exectype implies.\n#\n# * true (recommended): allow local mode for small jobs, which is much faster.\n# * false (default): always use pig.exectype.\n#\n# pig.auto.local.enabled=false\n\n#\n# Definition of a small job for the pig.auto.local.enabled feature. Only jobs\n# with less than this may bytes are candidates to run locally (default:\n# 100000000 bytes, about 1GB)\n#\n# pig.auto.local.input.maxbytes=100000000\n\n############################################################################\n#\n# Security Features\n#\n\n# Comma-delimited list of commands/operators that are disallowed. This security\n# feature can be used by administrators to block use of certain commands by\n# users.\n#\n# * <blank> (default): all commands and operators are allowed.\n# * fs,set (for example): block all filesystem commands and config changes from pig scripts.\n#\n# pig.blacklist=\n# pig.blacklist=fs,set\n\n# Comma-delimited list of the only commands/operators that are allowed. This\n# security feature can be used by administrators to block use of certain\n# commands by users.\n#\n# * <blank> (default): all commands and operators not on the pig.blacklist are allowed.\n# * load,store,filter,group: only LOAD, STORE, FILTER, GROUP\n# from pig scripts. All other commands and operators will fail.\n#\n# pig.whitelist=\n# pig.whitelist=load,store,filter,group\n\n#####################################################################\n#\n# Advanced Site-specific Customizations\n#\n\n# Remove intermediate output files?\n#\n# * true (default, recommended): remove the files\n# * false: do NOT remove the files. You must clean them up yourself.\n#\n# Keeping them is useful for advanced debugging, but can be dangerous -- you\n# must clean them up yourself. Inspect the intermediate outputs with\n#\n# LOAD '/path/to/tmp/file' USING org.apache.pig.impl.io.TFileStorage();\n#\n# (Or ...SequenceFileInterStorage if pig.tmpfilecompression.storage is seqfile)\n#\n# pig.delete.temp.files=true\n\n# EXPERIMENTAL: A Pig Progress Notification Listener (PPNL) lets you wire pig's\n# progress into your visibility stack. To use a PPNL, supply the fully qualified\n# class name of a PPNL implementation. Note that only one PPNL can be set up, so\n# if you need several, write a PPNL that will chain them.\n#\n# See https://github.com/twitter/ambrose for a pretty awesome one of these\n#\n# pig.notification.listener=<fully qualified class name of a PPNL implementation>\n\n# String argument to pass to your PPNL constructor (optional). Only a single\n# string value is allowed. (default none)\n#\n# pig.notification.listener.arg=<somevalue>\n\n# EXPERIMENTAL: Class invoked to estimate the number of reducers to use.\n# (default: org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator)\n#\n# If you don't know how or why to write a PigReducerEstimator, you're unlikely\n# to use this. By default, the naive mapReduceLayer.InputSizeReducerEstimator is\n# used, but you can specify anything implementing the interface\n# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigReducerEstimator\n#\n# pig.exec.reducer.estimator=<fully qualified class name of a PigReducerEstimator implementation>\n\n# Optional String argument to pass to your PigReducerEstimator. (default: none;\n# a single String argument is allowed).\n#\n# pig.exec.reducer.estimator.arg=<somevalue>\n\n# Class invoked to report the size of reducers output. By default, the reducers'\n# output is computed as the total size of output files. But not every storage is\n# file-based, and so this logic can be replaced by implementing the interface\n# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigStatsOutputSizeReader\n# If you need to register more than one reader, you can register them as a comma\n# separated list. Every reader implements a boolean supports(POStore sto) method.\n# When there are more than one reader, they are consulted in order, and the\n# first one whose supports() method returns true will be used.\n#\n# pig.stats.output.size.reader=<fully qualified class name of a PigStatsOutputSizeReader implementation>\n# pig.stats.output.size.reader.unsupported=<comma separated list of StoreFuncs that are not supported by this reader>\n\n# By default, Pig retrieves TaskReports for every launched task to compute\n# various job statistics. But this can cause OOM if the number of tasks is\n# large. In such case, you can disable it by setting this property to true.\n# pig.stats.notaskreport=false\n\n#\n# Override hadoop configs programatically\n#\n# By default, Pig expects hadoop configs (hadoop-site.xml and core-site.xml)\n# to be present on the classpath. There are cases when these configs are\n# needed to be passed programatically, such as while using the PigServer API.\n# In such cases, you can override hadoop configs by setting the property\n# \"pig.use.overriden.hadoop.configs\".\n#\n# When this property is set to true, Pig ignores looking for hadoop configs\n# in the classpath and instead picks it up from Properties/Configuration\n# object passed to it.\n#\n# pig.use.overriden.hadoop.configs=false\n\n# Implied LoadFunc for the LOAD operation when no USING clause is\n# present. Supply the fully qualified class name of a LoadFunc\n# implementation. Note: setting this means you will have to modify most code\n# brought in from elsewhere on the web, as people generally omit the USING\n# clause for TSV files.\n#\n# * org.apache.pig.builtin.PigStorage (default): the traditional tab-separated-values LoadFunc\n# * my.custom.udfcollection.MyCustomLoadFunc (for example): use MyCustomLoadFunc instead\n#\n# pig.default.load.func=<fully qualified class name of a LoadFunc implementation>\n\n# The implied StoreFunc for STORE operations with no USING clause. Supply the\n# fully qualified class name of a StoreFunc implementation.\n#\n# * org.apache.pig.builtin.PigStorage (default): the traditional tab-separated-values StoreFunc.\n# * my.custom.udfcollection.MyCustomStoreFunc (for example): use MyCustomStoreFunc instead\n#\n# pig.default.store.func=<fully qualified class name of a StoreFunc implementation>\n\n# Recover jobs when the application master is restarted? (default: false). This\n# is a Hadoop 2 specific property; enable it to take advantage of AM recovery.\n#\n# pig.output.committer.recovery.support=true\n\n# Should scripts check to prevent multiple stores writing to the same location?\n# (default: false) When set to true, stops the execution of script right away.\n#\npig.location.check.strict=false\n\n# In addition to the fs-style commands (rm, ls, etc) Pig can now execute\n# SQL-style DDL commands, eg \"sql create table pig_test(name string, age int)\".\n# The only implemented backend is hcat, and luckily that's also the default.\n#\n# pig.sql.type=hcat\n\n# Path to the hcat executable, for use with pig.sql.type=hcat (default: null)\n#\nhcat.bin=/usr/local/hcat/bin/hcat\n\n###########################################################################\n#\n# Overrides for extreme environments\n#\n# (Most people won't have to adjust these parameters)\n#\n\n\n# Limit the pig script length placed in the jobconf xml. (default:10240)\n# Extremely long queries can waste space in the JobConf; since its contents are\n# only advisory, the default is fine unless you are retaining it for forensics.\n#\n# pig.script.max.size=10240\n\n# Disable use of counters by Pig. Note that the word 'counter' is singular here.\n#\n# * false (default, recommended): do NOT disable counters.\n# * true: disable counters. Set this to true only when your Pig job will\n# otherwise die because of using more counters than hadoop configured limit\n#\n# pig.disable.counter=true\n\n# Sample size (per-mapper, in number of rows) the ORDER..BY operation's\n# RandomSampleLoader uses to estimate how your data should be\n# partitioned. (default, recommended: 100 rows per task) Increase this if you\n# have exceptionally large input splits and are unhappy with the reducer skew.\n#\n# pig.random.sampler.sample.size=100\n\n# Process an entire script at once, reducing the amount of work and number of\n# tasks? (default, recommended: true) See http://pig.apache.org/docs/r0.12.0/perf.html#multi-query-execution\n#\n# MultiQuery optimization is very useful, and so the recommended default is\n# true. You may find a that a script fails to compile under MultiQuery. If so,\n# disable it at runtime:\n#\n# pig -no_multiquery script_that_makes_pig_sad.pig\n#\n# opt.multiquery=true\n\n# For small queries, fetch data directly from the HDFS. (default, recommended:\n# true). If you want to force Pig to launch a MR job, for example when you're\n# testing a live cluster, disable with the -N option. See PIG-3642.\n#\n# opt.fetch=true\n\n# Enable auto/grace parallelism in tez. These should be used by default unless\n# you encounter some bug in automatic parallelism. If pig.tez.auto.parallelism\n# to false, use 1 as default parallelism\npig.tez.auto.parallelism=true\npig.tez.grace.parallelism=true\n\n###########################################################################\n#\n# Streaming properties\n#\n\n# Define what properties will be set in the streaming environment. Just set this\n# property to a comma-delimited list of properties to set, and those properties\n# will be set in the environment.\n#\n# pig.streaming.environment=<comma-delimited list of propertes>\n\n# Specify a comma-delimited list of local files to ship to distributed cache for\n# streaming job.\n#\n# pig.streaming.ship.files=<comma-delimited list of local files>\n\n# Specify a comma-delimited list of remote files to cache on distributed cache\n# for streaming job.\n#\n# pig.streaming.cache.files=<comma-delimited list of remote files>\n\n# Specify the python command to be used for python streaming udf. By default,\n# python is used, but you can overwrite it with a non-default version such as\n# python2.7.\n#\n# pig.streaming.udf.python.command=python"
},
"oozie-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License. See accompanying LICENSE file.\n#\n\n# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time\n# XLogService sets its value to '${oozie.home}/logs'\n\n# The appender that Oozie uses must be named 'oozie' (i.e. log4j.appender.oozie)\n\n# Using the RollingFileAppender with the OozieRollingPolicy will roll the log file every hour and retain up to MaxHistory number of\n# log files. If FileNamePattern ends with \".gz\" it will create gzip files.\nlog4j.appender.oozie=org.apache.log4j.rolling.RollingFileAppender\nlog4j.appender.oozie.RollingPolicy=org.apache.oozie.util.OozieRollingPolicy\nlog4j.appender.oozie.File=${oozie.log.dir}/oozie.log\nlog4j.appender.oozie.Append=true\nlog4j.appender.oozie.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n\n# The FileNamePattern must end with \"-%d{yyyy-MM-dd-HH}.gz\" or \"-%d{yyyy-MM-dd-HH}\" and also start with the\n# value of log4j.appender.oozie.File\nlog4j.appender.oozie.RollingPolicy.FileNamePattern=${log4j.appender.oozie.File}-%d{yyyy-MM-dd-HH}\n# The MaxHistory controls how many log files will be retained (720 hours / 24 hours per day = 30 days); -1 to disable\nlog4j.appender.oozie.RollingPolicy.MaxHistory=720\n\n\n\nlog4j.appender.oozieError=org.apache.log4j.rolling.RollingFileAppender\nlog4j.appender.oozieError.RollingPolicy=org.apache.oozie.util.OozieRollingPolicy\nlog4j.appender.oozieError.File=${oozie.log.dir}/oozie-error.log\nlog4j.appender.oozieError.Append=true\nlog4j.appender.oozieError.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieError.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n\n# The FileNamePattern must end with \"-%d{yyyy-MM-dd-HH}.gz\" or \"-%d{yyyy-MM-dd-HH}\" and also start with the\n# value of log4j.appender.oozieError.File\nlog4j.appender.oozieError.RollingPolicy.FileNamePattern=${log4j.appender.oozieError.File}-%d{yyyy-MM-dd-HH}\n# The MaxHistory controls how many log files will be retained (720 hours / 24 hours per day = 30 days); -1 to disable\nlog4j.appender.oozieError.RollingPolicy.MaxHistory=720\nlog4j.appender.oozieError.filter.1 = org.apache.log4j.varia.LevelMatchFilter\nlog4j.appender.oozieError.filter.1.levelToMatch = WARN\nlog4j.appender.oozieError.filter.2 = org.apache.log4j.varia.LevelMatchFilter\nlog4j.appender.oozieError.filter.2.levelToMatch = ERROR\nlog4j.appender.oozieError.filter.3 = org.apache.log4j.varia.LevelMatchFilter\nlog4j.appender.oozieError.filter.3.levelToMatch = FATAL\nlog4j.appender.oozieError.filter.4 = org.apache.log4j.varia.DenyAllFilter\n\n\n\n# Uncomment the below two lines to use the DailyRollingFileAppender instead\n# The DatePattern must end with either \"dd\" or \"HH\"\n#log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender\n#log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH\n\nlog4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieops.DatePattern='.'yyyy-MM-dd\nlog4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log\nlog4j.appender.oozieops.Append=true\nlog4j.appender.oozieops.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd\nlog4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log\nlog4j.appender.oozieinstrumentation.Append=true\nlog4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd\nlog4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log\nlog4j.appender.oozieaudit.Append=true\nlog4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.openjpa.DatePattern='.'yyyy-MM-dd\nlog4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log\nlog4j.appender.openjpa.Append=true\nlog4j.appender.openjpa.layout=org.apache.log4j.PatternLayout\nlog4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.logger.openjpa=INFO, openjpa\nlog4j.logger.oozieops=INFO, oozieops\nlog4j.logger.oozieinstrumentation=ALL, oozieinstrumentation\nlog4j.logger.oozieaudit=ALL, oozieaudit\nlog4j.logger.org.apache.oozie=INFO, oozie, oozieError\nlog4j.logger.org.apache.hadoop=WARN, oozie\nlog4j.logger.org.mortbay=WARN, oozie\nlog4j.logger.org.hsqldb=WARN, oozie\nlog4j.logger.org.apache.hadoop.security.authentication.server=WARN, oozie"
},
"hadoop-env": {
"proxyuser_group": "users",
"hdfs_user_nproc_limit": "65536",
"namenode_opt_permsize": "128m",
"hdfs_tmp_dir": "/tmp",
"namenode_heapsize": "2048m",
"hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
"content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n {% if is_datanode_max_locked_memory_set %}\n ulimit -l {{datanode_max_locked_memory}}\n {% endif %}\n ulimit -n {{hdfs_user_nofile_limit}}\nfi",
"hdfs_user_nofile_limit": "128000",
"keyserver_port": "",
"hadoop_root_logger": "INFO,RFA",
"namenode_opt_maxnewsize": "256m",
"hdfs_log_dir_prefix": "/var/log/hadoop",
"keyserver_host": " ",
"nfsgateway_heapsize": "1024",
"dtnode_heapsize": "1024m",
"namenode_opt_maxpermsize": "256m",
"hdfs_user": "hdfs",
"namenode_opt_newsize": "256m",
"namenode_backup_dir": "/tmp/upgrades",
"hadoop_heapsize": "1024",
"hadoop_pid_dir_prefix": "/var/run/hadoop",
"hdfs_principal_name": "hdfs-examplecluster@HADOOP.TEST"
},
"tez-interactive-site": {
"tez.runtime.pipelined.sorter.lazy-allocate.memory": "true",
"tez.lib.uris": "/hdp/apps/${hdp.version}/tez_hive2/tez.tar.gz",
"tez.runtime.pipelined-shuffle.enabled": "false",
"tez.dag.recovery.enabled": "false",
"tez.grouping.node.local.only": "true",
"tez.runtime.shuffle.memory.limit.percent": "0.25",
"tez.runtime.shuffle.fetch.buffer.percent": "0.6",
"tez.runtime.report.partition.stats": "true",
"tez.runtime.shuffle.fetch.verify-disk-checksum": "false",
"tez.session.am.dag.submit.timeout.secs": "3600",
"tez.am.resource.memory.mb": "1536"
},
"livy-spark-blacklist": {
"content": "\n #\n # Configuration override / blacklist. Defines a list of properties that users are not allowed\n # to override when starting Spark sessions.\n #\n # This file takes a list of property names (one per line). Empty lines and lines starting with \"#\"\n # are ignored.\n #"
},
"admin-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_appender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
},
"hiveserver2-site": {
"hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator",
"hive.security.authorization.manager": "org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizerFactory",
"hive.metastore.metrics.enabled": "true",
"hive.security.authorization.enabled": "true",
"hive.service.metrics.file.location": "/var/log/hive/hiveserver2-report.json",
"hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2",
"hive.service.metrics.hadoop2.component": "hiveserver2",
"hive.conf.restricted.list": "hive.security.authorization.enabled,hive.security.authorization.manager,hive.security.authenticator.manager"
},
"ranger-hbase-security": {
"xasecure.hbase.update.xapolicies.on.grant.revoke": "true",
"ranger.plugin.hbase.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
"ranger.plugin.hbase.service.name": "{{repo_name}}",
"ranger.plugin.hbase.policy.rest.ssl.config.file": "/etc/hbase/conf/ranger-policymgr-ssl.xml",
"ranger.plugin.hbase.policy.pollIntervalMs": "30000",
"ranger.plugin.hbase.policy.rest.url": "{{policymgr_mgr_url}}",
"ranger.plugin.hbase.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache"
},
"ssl-client": {
"ssl.client.truststore.reload.interval": "10000",
"ssl.client.keystore.password": "bigdata",
"ssl.client.truststore.type": "jks",
"ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks",
"ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
"ssl.client.truststore.password": "bigdata",
"ssl.client.keystore.type": "jks"
},
"falcon-startup.properties": {
"*.falcon.security.authorization.superusergroup": "falcon",
"*.oozie.process.workflow.builder": "org.apache.falcon.workflow.OozieProcessWorkflowBuilder",
"*.falcon.security.authorization.provider": "org.apache.falcon.security.DefaultAuthorizationProvider",
"*.falcon.enableTLS": "false",
"*.falcon.http.authentication.token.validity": "36000",
"*.internal.queue.size": "1000",
"*.ProcessInstanceManager.impl": "org.apache.falcon.resource.InstanceManager",
"*.falcon.graph.preserve.history": "false",
"*.falcon.http.authentication.simple.anonymous.allowed": "true",
"*.falcon.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"*.falcon.feed.lifecycle.policies": "org.apache.falcon.lifecycle.retention.AgeBasedDelete",
"*.broker.url": "tcp://mn01.vagrant:61616",
"*.falcon.feed.lifecycle.policy.builders": "org.apache.falcon.lifecycle.engine.oozie.retention.AgeBasedDeleteBuilder",
"*.system.lib.location": "${falcon.home}/server/webapp/${falcon.app.type}/WEB-INF/lib",
"*.falcon.http.authentication.blacklisted.users": "",
"*.falcon.graph.serialize.path": "/hadoop/falcon/data/lineage",
"*.entity.topic": "FALCON.ENTITY.TOPIC",
"*.domain": "${falcon.app.type}",
"*.dfs.namenode.kerberos.principal": "nn/_HOST@HADOOP.TEST",
"*.falcon.http.authentication.cookie.domain": "EXAMPLE.COM",
"*.falcon.graph.blueprints.graph": "com.thinkaurelius.titan.core.TitanFactory",
"*.falcon.http.authentication.kerberos.principal": "HTTP/_HOST@HADOOP.TEST",
"*.ConfigSyncService.impl": "org.apache.falcon.resource.ConfigSyncService",
"*.workflow.engine.impl": "org.apache.falcon.workflow.engine.OozieWorkflowEngine",
"*.falcon.http.authentication.signature.secret": "falcon",
"*.retry.recorder.path": "${falcon.log.dir}/retry",
"*.broker.ttlInMins": "4320",
"*.falcon.authentication.type": "kerberos",
"*.catalog.service.impl": "org.apache.falcon.catalog.HiveCatalogService",
"*.falcon.graph.storage.directory": "/hadoop/falcon/data/lineage/graphdb",
"*.extension.store.uri": "/apps/falcon/extensions",
"*.falcon.http.authentication.kerberos.name.rules": "RULE:[1:$1@$0](ambari-qa-examplecluster@HADOOP.TEST)s/.*/ambari-qa/\\\nRULE:[1:$1@$0](hbase-examplecluster@HADOOP.TEST)s/.*/hbase/\\\nRULE:[1:$1@$0](hdfs-examplecluster@HADOOP.TEST)s/.*/hdfs/\\\nRULE:[1:$1@$0](spark-examplecluster@HADOOP.TEST)s/.*/spark/\\\nRULE:[1:$1@$0](.*@HADOOP.TEST)s/@.*//\\\nRULE:[2:$1@$0](amshbase@HADOOP.TEST)s/.*/ams/\\\nRULE:[2:$1@$0](amszk@HADOOP.TEST)s/.*/ams/\\\nRULE:[2:$1@$0](dn@HADOOP.TEST)s/.*/hdfs/\\\nRULE:[2:$1@$0](falcon@HADOOP.TEST)s/.*/falcon/\\\nRULE:[2:$1@$0](hbase@HADOOP.TEST)s/.*/hbase/\\\nRULE:[2:$1@$0](hive@HADOOP.TEST)s/.*/hive/\\\nRULE:[2:$1@$0](jhs@HADOOP.TEST)s/.*/mapred/\\\nRULE:[2:$1@$0](nm@HADOOP.TEST)s/.*/yarn/\\\nRULE:[2:$1@$0](nn@HADOOP.TEST)s/.*/hdfs/\\\nRULE:[2:$1@$0](oozie@HADOOP.TEST)s/.*/oozie/\\\nRULE:[2:$1@$0](rangeradmin@HADOOP.TEST)s/.*/ranger/\\\nRULE:[2:$1@$0](rangerusersync@HADOOP.TEST)s/.*/rangerusersync/\\\nRULE:[2:$1@$0](rm@HADOOP.TEST)s/.*/yarn/\\\nRULE:[2:$1@$0](yarn@HADOOP.TEST)s/.*/yarn/\\\nDEFAULT",
"*.application.services": "org.apache.falcon.security.AuthenticationInitializationService,\\\n org.apache.falcon.workflow.WorkflowJobEndNotificationService, \\\n org.apache.falcon.service.ProcessSubscriberService,\\\n org.apache.falcon.extensions.ExtensionService,\\\n org.apache.falcon.service.LifecyclePolicyMap,\\\n org.apache.falcon.entity.store.ConfigurationStore,\\\n org.apache.falcon.rerun.service.RetryService,\\\n org.apache.falcon.rerun.service.LateRunService,\\\n org.apache.falcon.service.LogCleanupService,\\\n org.apache.falcon.metadata.MetadataMappingService{{atlas_application_class_addition}}",
"*.falcon.service.authentication.kerberos.principal": "falcon/_HOST@HADOOP.TEST",
"*.SchedulableEntityManager.impl": "org.apache.falcon.resource.SchedulableEntityManager",
"*.max.retry.failure.count": "1",
"*.hive.shared.libs": "hive-exec,hive-metastore,hive-common,hive-service,hive-hcatalog-server-extensions,\\\nhive-hcatalog-core,hive-jdbc,hive-webhcat-java-client",
"*.falcon.http.authentication.type": "kerberos",
"*.falcon.security.authorization.admin.users": "falcon,ambari-qa",
"*.journal.impl": "org.apache.falcon.transaction.SharedFileSystemJournal",
"*.oozie.feed.workflow.builder": "org.apache.falcon.workflow.OozieFeedWorkflowBuilder",
"prism.application.services": "org.apache.falcon.entity.store.ConfigurationStore",
"*.shared.libs": "activemq-all,ant,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3,commons-el",
"*.lifecycle.engine.impl": "org.apache.falcon.lifecycle.engine.oozie.OoziePolicyBuilderFactory",
"*.falcon.cleanup.service.frequency": "days(1)",
"*.falcon.graph.storage.backend": "berkeleyje",
"prism.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph,\\\n org.apache.falcon.entity.ColoClusterRelation,\\\n org.apache.falcon.group.FeedGroupMap",
"*.config.store.uri": "file:///hadoop/falcon/store",
"*.falcon.security.authorization.enabled": "false",
"*.falcon.security.authorization.admin.groups": "falcon",
"*.broker.impl.class": "org.apache.activemq.ActiveMQConnectionFactory",
"*.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph,\\\n org.apache.falcon.entity.ColoClusterRelation,\\\n org.apache.falcon.group.FeedGroupMap,\\\n org.apache.falcon.service.SharedLibraryHostingService",
"*.falcon.service.authentication.kerberos.keytab": "/etc/security/keytabs/falcon.service.keytab"
},
"hive-log4j2": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nstatus = INFO\nname = HiveLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.hive.log.level = INFO\nproperty.hive.root.logger = DRFA\nproperty.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}\nproperty.hive.log.file = hive.log\n\n# list of all appenders\nappenders = console, DRFA\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\n\n# daily rolling file appender\nappender.DRFA.type = RollingFile\nappender.DRFA.name = DRFA\nappender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}\n# Use %pid in the filePattern to append process-id@host-name to the filename if you want separate log files for different CLI session\nappender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}.gz\nappender.DRFA.layout.type = PatternLayout\nappender.DRFA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n\nappender.DRFA.policies.type = Policies\nappender.DRFA.policies.time.type = TimeBasedTriggeringPolicy\nappender.DRFA.policies.time.interval = 1\nappender.DRFA.policies.time.modulate = true\nappender.DRFA.strategy.type = DefaultRolloverStrategy\nappender.DRFA.strategy.max = 30\n\n# list of all loggers\nloggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX\n\nlogger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn\nlogger.NIOServerCnxn.level = WARN\n\nlogger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO\nlogger.ClientCnxnSocketNIO.level = WARN\n\nlogger.DataNucleus.name = DataNucleus\nlogger.DataNucleus.level = ERROR\n\nlogger.Datastore.name = Datastore\nlogger.Datastore.level = ERROR\n\nlogger.JPOX.name = JPOX\nlogger.JPOX.level = ERROR\n\n# root logger\nrootLogger.level = ${sys:hive.log.level}\nrootLogger.appenderRefs = root\nrootLogger.appenderRef.root.ref = ${sys:hive.root.logger}"
},
"tagsync-application-properties": {
"atlas.kafka.entities.group.id": "ranger_entities_consumer",
"atlas.kafka.zookeeper.connect": "localhost:2181",
"atlas.kafka.bootstrap.servers": "localhost:6667"
},
"ranger-hive-policymgr-ssl": {
"xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hive-server2/conf/ranger-plugin-keystore.jks",
"xasecure.policymgr.clientssl.truststore.password": "changeit",
"xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hive-server2/conf/ranger-plugin-truststore.jks",
"xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
},
"spark-javaopts-properties": {
"content": " "
},
"webhcat-env": {
"content": "\n# The file containing the running pid\nPID_FILE={{webhcat_pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}"
},
"ams-ssl-server": {
"ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
"ssl.server.keystore.keypassword": "bigdata",
"ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
"ssl.server.keystore.password": "bigdata",
"ssl.server.truststore.password": "bigdata",
"ssl.server.truststore.type": "jks",
"ssl.server.keystore.type": "jks",
"ssl.server.truststore.reload.interval": "10000"
},
"tez-site": {
"tez.task.get-task.sleep.interval-ms.max": "200",
"tez.task.max-events-per-heartbeat": "500",
"tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
"tez.runtime.compress": "true",
"tez.runtime.io.sort.mb": "135",
"tez.runtime.shuffle.fetch.buffer.percent": "0.6",
"tez.runtime.convert.user-payload.to.history-text": "false",
"tez.generate.debug.artifacts": "false",
"tez.am.tez-ui.history-url.template": "__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__",
"tez.am.view-acls": "",
"tez.am.log.level": "INFO",
"tez.counters.max.groups": "3000",
"tez.counters.max": "10000",
"tez.shuffle-vertex-manager.max-src-fraction": "0.4",
"tez.runtime.unordered.output.buffer.size-mb": "38",
"tez.queue.name": "default",
"tez.task.resource.memory.mb": "512",
"tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSV15HistoryLoggingService",
"tez.runtime.optimize.local.fetch": "true",
"tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
"tez.task.am.heartbeat.counter.interval-ms.max": "4000",
"tez.am.max.app.attempts": "2",
"tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
"tez.am.container.idle.release-timeout-max.millis": "20000",
"tez.use.cluster.hadoop-libs": "false",
"tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
"tez.am.container.idle.release-timeout-min.millis": "10000",
"tez.grouping.min-size": "16777216",
"tez.runtime.sorter.class": "PIPELINED",
"tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec",
"tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
"tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
"tez.am.container.reuse.enabled": "true",
"tez.session.am.dag.submit.timeout.secs": "600",
"tez.grouping.split-waves": "1.7",
"tez.grouping.max-size": "1073741824",
"tez.session.client.timeout.secs": "-1",
"tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
"tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz",
"tez.staging-dir": "/tmp/${user.name}/staging",
"tez.am.am-rm.heartbeat.interval-ms.max": "250",
"tez.runtime.shuffle.memory.limit.percent": "0.25",
"tez.task.generate.counters.per.io": "true",
"tez.am.maxtaskfailures.per.node": "10",
"tez.am.container.reuse.non-local-fallback.enabled": "false",
"tez.am.container.reuse.rack-fallback.enabled": "true",
"tez.tez-ui.history-url.base": "http://mn01.vagrant:8080/#/main/views/TEZ/0.7.0.2.5.3.0-136/TEZ_CLUSTER_INSTANCE",
"tez.runtime.pipelined.sorter.sort.threads": "2",
"tez.am.container.reuse.locality.delay-allocation-millis": "250",
"tez.shuffle-vertex-manager.min-src-fraction": "0.2",
"tez.am.resource.memory.mb": "1024"
},
"spark-thrift-sparkconf": {
"spark.eventLog.dir": "{{spark_history_dir}}",
"spark.yarn.principal": "{{hive_kerberos_principal}}",
"spark.history.fs.logDirectory": "{{spark_history_dir}}",
"spark.scheduler.mode": "FAIR",
"spark.dynamicAllocation.initialExecutors": "0",
"spark.dynamicAllocation.enabled": "true",
"spark.scheduler.allocation.file": "{{spark_conf}}/spark-thrift-fairscheduler.xml",
"spark.dynamicAllocation.maxExecutors": "10",
"spark.shuffle.service.enabled": "true",
"spark.dynamicAllocation.minExecutors": "0",
"spark.driver.extraLibraryPath": "{{spark_hadoop_lib_native}}",
"spark.yarn.queue": "default",
"spark.hadoop.cacheConf": "false",
"spark.master": "{{spark_thrift_master}}",
"spark.executor.memory": "1g",
"spark.executor.extraLibraryPath": "{{spark_hadoop_lib_native}}",
"spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider",
"spark.yarn.keytab": "{{hive_kerberos_keytab}}",
"spark.yarn.am.memory": "512m",
"spark.eventLog.enabled": "true"
},
"ranger-tagsync-site": {
"ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
"ranger.tagsync.source.atlasrest.username": "admin",
"ranger.tagsync.logdir": "/var/log/ranger/tagsync",
"ranger.tagsync.source.atlasrest.download.interval.millis": "60000",
"ranger.tagsync.source.atlasrest.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/atlasuser.jceks",
"ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
"ranger.tagsync.source.file.check.interval.millis": "",
"ranger.tagsync.source.atlasrest.endpoint": "",
"ranger.tagsync.dest.ranger.username": "rangertagsync",
"ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}",
"ranger.tagsync.kerberos.principal": "",
"ranger.tagsync.kerberos.keytab": "",
"ranger.tagsync.source.atlas": "false",
"ranger.tagsync.source.atlasrest": "false",
"ranger.tagsync.source.file": "false",
"ranger.tagsync.source.file.filename": ""
},
"hive-exec-log4j": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Define some default values that can be overridden by system properties\n\nhive.log.threshold=ALL\nhive.root.logger=INFO,FA\nhive.log.dir=${java.io.tmpdir}/${user.name}\nhive.query.id=hadoop\nhive.log.file=${hive.query.id}.log\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hive.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=${hive.log.threshold}\n\n#\n# File Appender\n#\n\nlog4j.appender.FA=org.apache.log4j.FileAppender\nlog4j.appender.FA.File=${hive.log.dir}/${hive.log.file}\nlog4j.appender.FA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\nlog4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#custom logging levels\n#log4j.logger.xxx=DEBUG\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=ERROR,FA\nlog4j.category.Datastore=ERROR,FA\nlog4j.category.Datastore.Schema=ERROR,FA\nlog4j.category.JPOX.Datastore=ERROR,FA\nlog4j.category.JPOX.Plugin=ERROR,FA\nlog4j.category.JPOX.MetaData=ERROR,FA\nlog4j.category.JPOX.Query=ERROR,FA\nlog4j.category.JPOX.General=ERROR,FA\nlog4j.category.JPOX.Enhancer=ERROR,FA\n\n\n# Silence useless ZK logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA"
},
"hive-interactive-site": {
"hive.tez.input.generate.consistent.splits": "true",
"hive.llap.daemon.service.principal": "hive/_HOST@HADOOP.TEST",
"hive.llap.client.consistent.splits": "true",
"hive.tez.bucket.pruning": "true",
"hive.llap.object.cache.enabled": "true",
"llap.shuffle.connection-keep-alive.enable": "true",
"hive.execution.engine": "tez",
"hive.vectorized.execution.reduce.enabled": "true",
"hive.llap.daemon.service.hosts": "@llap0",
"hive.llap.task.scheduler.locality.delay": "-1",
"hive.server2.thrift.port": "10500",
"hive.server2.thrift.http.port": "10501",
"hive.llap.zk.sm.connectionString": "mn01.vagrant:2181",
"hive.llap.daemon.task.scheduler.enable.preemption": "true",
"hive.llap.io.threadpool.size": "2",
"hive.llap.daemon.vcpus.per.instance": "${hive.llap.daemon.num.executors}",
"hive.optimize.dynamic.partition.hashjoin": "true",
"hive.server2.zookeeper.namespace": "hiveserver2-hive2",
"hive.llap.io.memory.mode": "",
"hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled": "true",
"hive.server2.tez.initialize.default.sessions": "true",
"hive.llap.auto.allow.uber": "false",
"hive.metastore.event.listeners": "",
"hive.server2.tez.default.queues": "default",
"hive.prewarm.enabled": "false",
"hive.metastore.uris": "",
"hive.llap.io.enabled": "true",
"hive.llap.daemon.yarn.container.mb": "341",
"hive.llap.io.memory.size": "0",
"hive.server2.webui.use.ssl": "false",
"hive.vectorized.execution.mapjoin.native.enabled": "true",
"hive.driver.parallel.compilation": "true",
"hive.llap.daemon.num.executors": "1",
"hive.vectorized.execution.mapjoin.minmax.enabled": "true",
"hive.server2.tez.sessions.per.default.queue": "1",
"hive.mapjoin.hybridgrace.hashtable": "false",
"hive.llap.daemon.allow.permanent.fns": "false",
"hive.server2.enable.doAs": "false",
"hive.llap.zk.sm.principal": "hive/_HOST@HADOOP.TEST",
"hive.server2.webui.port": "10502",
"hive.llap.daemon.queue.name": "default",
"hive.exec.orc.split.strategy": "HYBRID",
"hive.tez.exec.print.summary": "true",
"hive.execution.mode": "llap",
"hive.llap.management.rpc.port": "15004",
"hive.llap.io.use.lrfu": "true",
"llap.shuffle.connection-keep-alive.timeout": "60",
"hive.llap.zk.sm.keytab.file": "/etc/security/keytabs/hive.llap.zk.sm.keytab",
"hive.llap.daemon.yarn.shuffle.port": "15551",
"hive.llap.daemon.keytab.file": "/etc/security/keytabs/hive.service.keytab",
"hive.llap.execution.mode": "all",
"hive.llap.daemon.rpc.port": "15001"
},
"ranger-hive-plugin-properties": {
"hadoop.rpc.protection": "",
"ranger-hive-plugin-enabled": "Yes",
"jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver",
"REPOSITORY_CONFIG_USERNAME": "hive",
"policy_user": "ambari-qa",
"common.name.for.certificate": "",
"REPOSITORY_CONFIG_PASSWORD": "hive"
},
"krb5-conf": {
"domains": "",
"manage_krb5_conf": "false",
"content": "\n[libdefaults]\n renew_lifetime = 7d\n forwardable = true\n default_realm = {{realm}}\n ticket_lifetime = 24h\n dns_lookup_realm = false\n dns_lookup_kdc = false\n default_ccache_name = /tmp/krb5cc_%{uid}\n #default_tgs_enctypes = {{encryption_types}}\n #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n default = FILE:/var/log/krb5kdc.log\n admin_server = FILE:/var/log/kadmind.log\n kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',') -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n }\n\n{# Append additional realm declarations below #}",
"conf_dir": "/etc"
},
"core-site": {
"hadoop.proxyuser.falcon.hosts": "*",
"net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py",
"hadoop.proxyuser.HTTP.groups": "users",
"hadoop.proxyuser.yarn.groups": "*",
"hadoop.proxyuser.hdfs.groups": "*",
"hadoop.proxyuser.hcat.hosts": " m",
"hadoop.proxyuser.falcon.groups": "*",
"hadoop.proxyuser.hcat.groups": "*",
"fs.trash.interval": "360",
"hadoop.proxyuser.hbase.hosts": "*",
"hadoop.http.authentication.simple.anonymous.allowed": "true",
"io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
"hadoop.proxyuser.root.groups": "*",
"hadoop.proxyuser.HTTP.hosts": "mn01.vagrant",
"hadoop.proxyuser.livy.hosts": "*",
"ipc.client.idlethreshold": "8000",
"io.file.buffer.size": "131072",
"io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
"hadoop.security.authentication": "kerberos",
"hadoop.proxyuser.root.hosts": "mn01.vagrant",
"mapreduce.jobtracker.webinterface.trusted": "false",
"hadoop.proxyuser.hdfs.hosts": "*",
"hadoop.proxyuser.hive.hosts": " m",
"fs.defaultFS": "hdfs://mn01.vagrant:8020",
"hadoop.proxyuser.livy.groups": "*",
"hadoop.proxyuser.oozie.groups": "*",
"ha.failover-controller.active-standby-elector.zk.op.retries": "120",
"hadoop.proxyuser.hive.groups": "*",
"hadoop.security.key.provider.path": "",
"hadoop.proxyuser.hbase.groups": "*",
"hadoop.proxyuser.yarn.hosts": "mn01.vagrant",
"hadoop.security.authorization": "true",
"ipc.server.tcpnodelay": "true",
"ipc.client.connect.max.retries": "50",
"hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-examplecluster@HADOOP.TEST)s/.*/ambari-qa/\nRULE:[1:$1@$0](hbase-examplecluster@HADOOP.TEST)s/.*/hbase/\nRULE:[1:$1@$0](hdfs-examplecluster@HADOOP.TEST)s/.*/hdfs/\nRULE:[1:$1@$0](spark-examplecluster@HADOOP.TEST)s/.*/spark/\nRULE:[1:$1@$0](.*@HADOOP.TEST)s/@.*//\nRULE:[2:$1@$0](amshbase@HADOOP.TEST)s/.*/ams/\nRULE:[2:$1@$0](amszk@HADOOP.TEST)s/.*/ams/\nRULE:[2:$1@$0](dn@HADOOP.TEST)s/.*/hdfs/\nRULE:[2:$1@$0](falcon@HADOOP.TEST)s/.*/falcon/\nRULE:[2:$1@$0](hbase@HADOOP.TEST)s/.*/hbase/\nRULE:[2:$1@$0](hive@HADOOP.TEST)s/.*/hive/\nRULE:[2:$1@$0](jhs@HADOOP.TEST)s/.*/mapred/\nRULE:[2:$1@$0](nm@HADOOP.TEST)s/.*/yarn/\nRULE:[2:$1@$0](nn@HADOOP.TEST)s/.*/hdfs/\nRULE:[2:$1@$0](oozie@HADOOP.TEST)s/.*/oozie/\nRULE:[2:$1@$0](rangeradmin@HADOOP.TEST)s/.*/ranger/\nRULE:[2:$1@$0](rangerusersync@HADOOP.TEST)s/.*/rangerusersync/\nRULE:[2:$1@$0](rm@HADOOP.TEST)s/.*/yarn/\nRULE:[2:$1@$0](yarn@HADOOP.TEST)s/.*/yarn/\nDEFAULT",
"hadoop.proxyuser.oozie.hosts": "*",
"ipc.client.connection.maxidletime": "30000"
},
"hiveserver2-interactive-site": {
"hive.service.metrics.hadoop2.component": "hiveserver2",
"hive.metastore.metrics.enabled": "true",
"hive.service.metrics.file.location": "/var/log/hive/hiveserver2Interactive-report.json",
"hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2",
"hive.async.log.enabled": "false"
},
"capacity-scheduler": {
"yarn.scheduler.capacity.default.minimum-user-limit-percent": "100",
"yarn.scheduler.capacity.root.default.acl_administer_queue": "yarn",
"yarn.scheduler.capacity.root.default.maximum-capacity": "100",
"yarn.scheduler.capacity.root.accessible-node-labels": "*",
"yarn.scheduler.capacity.root.capacity": "100",
"yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
"yarn.scheduler.capacity.maximum-applications": "10000",
"yarn.scheduler.capacity.root.default.user-limit-factor": "1",
"yarn.scheduler.capacity.node-locality-delay": "40",
"yarn.scheduler.capacity.root.default.acl_submit_applications": "yarn",
"yarn.scheduler.capacity.root.default.state": "RUNNING",
"yarn.scheduler.capacity.root.default.capacity": "100",
"yarn.scheduler.capacity.root.acl_administer_queue": "yarn",
"yarn.scheduler.capacity.root.queues": "default",
"yarn.scheduler.capacity.root.acl_administer_jobs": "yarn",
"yarn.scheduler.capacity.root.default.acl_administer_jobs": "yarn",
"yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator"
},
"ranger-kafka-security": {
"ranger.plugin.kafka.policy.pollIntervalMs": "30000",
"ranger.plugin.kafka.service.name": "{{repo_name}}",
"ranger.plugin.kafka.policy.rest.ssl.config.file": "/etc/kafka/conf/ranger-policymgr-ssl.xml",
"ranger.plugin.kafka.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
"ranger.plugin.kafka.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
"ranger.plugin.kafka.policy.rest.url": "{{policymgr_mgr_url}}"
},
"kafka-env": {
"kafka_user_nproc_limit": "65536",
"kafka_principal_name": "kafka/_HOST@HADOOP.TEST",
"content": "\n#!/bin/bash\n\n# Set KAFKA specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\nexport PATH=$PATH:$JAVA_HOME/bin\nexport PID_DIR={{kafka_pid_dir}}\nexport LOG_DIR={{kafka_log_dir}}\nexport KAFKA_KERBEROS_PARAMS={{kafka_kerberos_params}}\n# Add kafka sink to classpath and related depenencies\nif [ -e \"/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar\" ]; then\n export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar\n export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/lib/*\nfi\nif [ -f /etc/kafka/conf/kafka-ranger-env.sh ]; then\n. /etc/kafka/conf/kafka-ranger-env.sh\nfi",
"kafka_log_dir": "/var/log/kafka",
"kafka_pid_dir": "/var/run/kafka",
"kafka_user_nofile_limit": "128000",
"is_supported_kafka_ranger": "true",
"kafka_user": "kafka",
"kafka_keytab": "/etc/security/keytabs/kafka.service.keytab"
},
"tagsync-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync.log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
},
"usersync-properties": {},
"ams-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Define some default values that can be overridden by system properties\nams.log.dir=.\nams.log.file=ambari-metrics-collector.log\n\n# Root logger option\nlog4j.rootLogger=INFO,file\n\n# Direct log messages to a log file\nlog4j.appender.file=org.apache.log4j.RollingFileAppender\nlog4j.appender.file.File=${ams.log.dir}/${ams.log.file}\nlog4j.appender.file.MaxFileSize=80MB\nlog4j.appender.file.MaxBackupIndex=60\nlog4j.appender.file.layout=org.apache.log4j.PatternLayout\nlog4j.appender.file.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n"
},
"hive-exec-log4j2": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nstatus = INFO\nname = HiveExecLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.hive.log.level = INFO\nproperty.hive.root.logger = FA\nproperty.hive.query.id = hadoop\nproperty.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}\nproperty.hive.log.file = ${sys:hive.query.id}.log\n\n# list of all appenders\nappenders = console, FA\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\n\n# simple file appender\nappender.FA.type = File\nappender.FA.name = FA\nappender.FA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}\nappender.FA.layout.type = PatternLayout\nappender.FA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n\n\n# list of all loggers\nloggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX\n\nlogger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn\nlogger.NIOServerCnxn.level = WARN\n\nlogger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO\nlogger.ClientCnxnSocketNIO.level = WARN\n\nlogger.DataNucleus.name = DataNucleus\nlogger.DataNucleus.level = ERROR\n\nlogger.Datastore.name = Datastore\nlogger.Datastore.level = ERROR\n\nlogger.JPOX.name = JPOX\nlogger.JPOX.level = ERROR\n\n# root logger\nrootLogger.level = ${sys:hive.log.level}\nrootLogger.appenderRefs = root\nrootLogger.appenderRef.root.ref = ${sys:hive.root.logger}"
},
"zookeeper-env": {
"zk_server_heapsize": "1024m",
"zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab",
"zk_user": "zookeeper",
"zk_log_dir": "/var/log/zookeeper",
"content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
"zk_pid_dir": "/var/run/zookeeper",
"zookeeper_principal_name": "zookeeper/_HOST@HADOOP.TEST"
},
"ams-hbase-log4j": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nhbase.root.logger=INFO,console\nhbase.security.logger=INFO,console\nhbase.log.dir=.\nhbase.log.file=hbase.log\n\n# Define the root logger to the system property \"hbase.root.logger\".\nlog4j.rootLogger=${hbase.root.logger}\n\n# Logging Threshold\nlog4j.threshold=ALL\n\n#\n# Daily Rolling File Appender\n#\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Rolling File Appender properties\nhbase.log.maxfilesize=256MB\nhbase.log.maxbackupindex=20\n\n# Rolling File Appender\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n\nlog4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\nlog4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n#\n# Security audit appender\n#\nhbase.security.log.file=SecurityAuth.audit\nhbase.security.log.maxfilesize=256MB\nhbase.security.log.maxbackupindex=20\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\nlog4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.category.SecurityLogger=${hbase.security.logger}\nlog4j.additivity.SecurityLogger=false\n#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n\n#\n# Null Appender\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Custom Logging levels\n\nlog4j.logger.org.apache.zookeeper=INFO\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.hbase=INFO\n# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n#log4j.logger.org.apache.hadoop.dfs=DEBUG\n# Set this class to log INFO only otherwise its OTT\n# Enable this to get detailed connection error/retry logging.\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n\n\n# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n\n# Uncomment the below if you want to remove logging of client region caching'\n# and scan of .META. messages\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO"
},
"cluster-env": {
"security_enabled": "true",
"override_uid": "true",
"fetch_nonlocal_groups": "true",
"one_dir_per_partition": "false",
"commands_to_retry": "INSTALL,START",
"repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}",
"ignore_groupsusers_create": "false",
"alerts_repeat_tolerance": "1",
"smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
"kerberos_domain": "EXAMPLE.COM",
"manage_dirs_on_root": "true",
"recovery_lifetime_max_count": "1024",
"recovery_type": "AUTO_START",
"ignore_bad_mounts": "false",
"recovery_window_in_minutes": "60",
"command_retry_enabled": "true",
"stack_tools": "{\n \"stack_selector\": [\"hdp-select\", \"/usr/bin/hdp-select\", \"hdp-select\"],\n \"conf_selector\": [\"conf-select\", \"/usr/bin/conf-select\", \"conf-select\"]\n}",
"recovery_retry_interval": "5",
"command_retry_max_time_in_sec": "600",
"stack_features": "{\n \"stack_features\": [\n {\n \"name\": \"snappy\",\n \"description\": \"Snappy compressor/decompressor support\",\n \"min_version\": \"2.0.0.0\",\n \"max_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"lzo\",\n \"description\": \"LZO libraries support\",\n \"min_version\": \"2.2.1.0\"\n },\n {\n \"name\": \"express_upgrade\",\n \"description\": \"Express upgrade support\",\n \"min_version\": \"2.1.0.0\"\n },\n {\n \"name\": \"rolling_upgrade\",\n \"description\": \"Rolling upgrade support\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"config_versioning\",\n \"description\": \"Configurable versions support\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"datanode_non_root\",\n \"description\": \"DataNode running as non-root support (AMBARI-7615)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"remove_ranger_hdfs_plugin_env\",\n \"description\": \"HDFS removes Ranger env files (AMBARI-14299)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"ranger\",\n \"description\": \"Ranger Service support\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"ranger_tagsync_component\",\n \"description\": \"Ranger Tagsync component support (AMBARI-14383)\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"phoenix\",\n \"description\": \"Phoenix Service support\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"nfs\",\n \"description\": \"NFS support\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"tez_for_spark\",\n \"description\": \"Tez dependency for Spark\",\n \"min_version\": \"2.2.0.0\",\n \"max_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"timeline_state_store\",\n \"description\": \"Yarn application timeline-service supports state store property (AMBARI-11442)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"copy_tarball_to_hdfs\",\n \"description\": \"Copy tarball to HDFS support (AMBARI-12113)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"spark_16plus\",\n \"description\": \"Spark 1.6+\",\n \"min_version\": \"2.4.0.0\"\n },\n {\n \"name\": \"spark_thriftserver\",\n \"description\": \"Spark Thrift Server\",\n \"min_version\": \"2.3.2.0\"\n },\n {\n \"name\": \"storm_kerberos\",\n \"description\": \"Storm Kerberos support (AMBARI-7570)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"storm_ams\",\n \"description\": \"Storm AMS integration (AMBARI-10710)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"create_kafka_broker_id\",\n \"description\": \"Ambari should create Kafka Broker Id (AMBARI-12678)\",\n \"min_version\": \"2.2.0.0\",\n \"max_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"kafka_listeners\",\n \"description\": \"Kafka listeners (AMBARI-10984)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"kafka_kerberos\",\n \"description\": \"Kafka Kerberos support (AMBARI-10984)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"pig_on_tez\",\n \"description\": \"Pig on Tez support (AMBARI-7863)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"ranger_usersync_non_root\",\n \"description\": \"Ranger Usersync as non-root user (AMBARI-10416)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"ranger_audit_db_support\",\n \"description\": \"Ranger Audit to DB support\",\n \"min_version\": \"2.2.0.0\",\n \"max_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"accumulo_kerberos_user_auth\",\n \"description\": \"Accumulo Kerberos User Auth (AMBARI-10163)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"knox_versioned_data_dir\",\n \"description\": \"Use versioned data dir for Knox (AMBARI-13164)\",\n \"min_version\": \"2.3.2.0\"\n },\n {\n \"name\": \"knox_sso_topology\",\n \"description\": \"Knox SSO Topology support (AMBARI-13975)\",\n \"min_version\": \"2.3.8.0\"\n },\n {\n \"name\": \"atlas_rolling_upgrade\",\n \"description\": \"Rolling upgrade support for Atlas\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"oozie_admin_user\",\n \"description\": \"Oozie install user as an Oozie admin user (AMBARI-7976)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"oozie_create_hive_tez_configs\",\n \"description\": \"Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"oozie_setup_shared_lib\",\n \"description\": \"Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"oozie_host_kerberos\",\n \"description\": \"Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)\",\n \"min_version\": \"2.0.0.0\",\n \"max_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"falcon_extensions\",\n \"description\": \"Falcon Extension\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"hive_metastore_upgrade_schema\",\n \"description\": \"Hive metastore upgrade schema support (AMBARI-11176)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"hive_server_interactive\",\n \"description\": \"Hive server interactive support (AMBARI-15573)\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"hive_webhcat_specific_configs\",\n \"description\": \"Hive webhcat specific configurations support (AMBARI-12364)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"hive_purge_table\",\n \"description\": \"Hive purge table support (AMBARI-12260)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"hive_server2_kerberized_env\",\n \"description\": \"Hive server2 working on kerberized environment (AMBARI-13749)\",\n \"min_version\": \"2.2.3.0\",\n \"max_version\": \"2.2.5.0\"\n },\n {\n \"name\": \"hive_env_heapsize\",\n \"description\": \"Hive heapsize property defined in hive-env (AMBARI-12801)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"ranger_kms_hsm_support\",\n \"description\": \"Ranger KMS HSM support (AMBARI-15752)\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_log4j_support\",\n \"description\": \"Ranger supporting log-4j properties (AMBARI-15681)\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_kerberos_support\",\n \"description\": \"Ranger Kerberos support\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"hive_metastore_site_support\",\n \"description\": \"Hive Metastore site support\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_usersync_password_jceks\",\n \"description\": \"Saving Ranger Usersync credentials in jceks\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_install_infra_client\",\n \"description\": \"Ambari Infra Service support\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"falcon_atlas_support_2_3\",\n \"description\": \"Falcon Atlas integration support for 2.3 stack\",\n \"min_version\": \"2.3.99.0\",\n \"max_version\": \"2.4.0.0\"\n },\n {\n \"name\": \"falcon_atlas_support\",\n \"description\": \"Falcon Atlas integration\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"hbase_home_directory\",\n \"description\": \"Hbase home directory in HDFS needed for HBASE backup\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"spark_livy\",\n \"description\": \"Livy as slave component of spark\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"atlas_ranger_plugin_support\",\n \"description\": \"Atlas Ranger plugin support\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"atlas_conf_dir_in_path\",\n \"description\": \"Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon\",\n \"min_version\": \"2.3.0.0\",\n \"max_version\": \"2.4.99.99\"\n },\n {\n \"name\": \"atlas_upgrade_support\",\n \"description\": \"Atlas supports express and rolling upgrades\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"atlas_hook_support\",\n \"description\": \"Atlas support for hooks in Hive, Storm, Falcon, and Sqoop\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_pid_support\",\n \"description\": \"Ranger Service support pid generation AMBARI-16756\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_kms_pid_support\",\n \"description\": \"Ranger KMS Service support pid generation\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_admin_password_change\",\n \"description\": \"Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"storm_metrics_apache_classes\",\n \"description\": \"Metrics sink for Storm that uses Apache class names\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"spark_java_opts_support\",\n \"description\": \"Allow Spark to generate java-opts file\",\n \"min_version\": \"2.2.0.0\",\n \"max_version\": \"2.4.0.0\"\n },\n {\n \"name\": \"atlas_hbase_setup\",\n \"description\": \"Use script to create Atlas tables in Hbase and set permissions for Atlas user.\",\n \"min_version\": \"2.5.0.0\"\n }\n ]\n}",
"recovery_enabled": "true",
"smokeuser_principal_name": "ambari-qa-examplecluster@HADOOP.TEST",
"recovery_max_count": "6",
"stack_root": "/usr/hdp",
"repo_suse_rhel_template": "[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0",
"ambari_principal_name": "ambari-server-examplecluster@HADOOP.TEST",
"user_group": "hadoop",
"managed_hdfs_resource_property_names": "",
"smokeuser": "ambari-qa"
},
"livy-log4j-properties": {
"content": "\n # Set everything to be logged to the console\n log4j.rootCategory=INFO, console\n log4j.appender.console=org.apache.log4j.ConsoleAppender\n log4j.appender.console.target=System.err\n log4j.appender.console.layout=org.apache.log4j.PatternLayout\n log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n log4j.logger.org.eclipse.jetty=WARN"
},
"mapred-site": {
"mapreduce.jobhistory.address": "mn01.vagrant:10020",
"mapreduce.jobhistory.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
"mapreduce.reduce.input.buffer.percent": "0.0",
"mapreduce.output.fileoutputformat.compress": "false",
"mapreduce.framework.name": "yarn",
"mapreduce.map.speculative": "false",
"mapreduce.reduce.shuffle.merge.percent": "0.66",
"mapreduce.map.sort.spill.percent": "0.7",
"yarn.app.mapreduce.am.resource.mb": "512",
"mapreduce.map.java.opts": "-Xmx1228m",
"mapreduce.reduce.shuffle.parallelcopies": "30",
"mapreduce.cluster.administrators": " hadoop",
"mapreduce.jobhistory.recovery.store.leveldb.path": "/hadoop/mapreduce/jhs",
"mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
"mapreduce.job.reduce.slowstart.completedmaps": "0.05",
"mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
"mapreduce.output.fileoutputformat.compress.type": "BLOCK",
"mapreduce.reduce.speculative": "false",
"mapreduce.reduce.java.opts": "-Xmx1228m",
"mapreduce.am.max-attempts": "2",
"yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}",
"mapreduce.reduce.log.level": "INFO",
"mapreduce.jobhistory.principal": "jhs/_HOST@HADOOP.TEST",
"mapreduce.job.emit-timeline-data": "false",
"mapreduce.task.io.sort.mb": "859",
"mapreduce.task.timeout": "300000",
"mapreduce.map.memory.mb": "1536",
"mapreduce.task.io.sort.factor": "100",
"mapreduce.jobhistory.http.policy": "HTTP_ONLY",
"mapreduce.reduce.memory.mb": "1536",
"mapreduce.jobhistory.recovery.enable": "true",
"mapreduce.job.counters.max": "130",
"mapreduce.map.log.level": "INFO",
"mapreduce.shuffle.port": "13562",
"mapreduce.job.queuename": "default",
"mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
"mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
"mapreduce.jobhistory.recovery.store.class": "org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService",
"mapreduce.map.output.compress": "false",
"yarn.app.mapreduce.am.staging-dir": "/user",
"yarn.app.mapreduce.am.log.level": "INFO",
"mapreduce.reduce.shuffle.input.buffer.percent": "0.7",
"mapreduce.jobhistory.webapp.address": "mn01.vagrant:19888",
"mapreduce.jobhistory.keytab": "/etc/security/keytabs/jhs.service.keytab",
"mapreduce.jobhistory.done-dir": "/mr-history/done",
"mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
"mapreduce.reduce.shuffle.fetch.retry.enabled": "1",
"mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
"yarn.app.mapreduce.am.command-opts": "-Xmx409m -Dhdp.version=${hdp.version}",
"mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000",
"mapreduce.jobhistory.bind-host": "0.0.0.0",
"mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
"mapreduce.jobhistory.webapp.spnego-principal": "HTTP/_HOST@HADOOP.TEST"
},
"webhcat-log4j": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Define some default values that can be overridden by system properties\nwebhcat.root.logger = INFO, standard\nwebhcat.log.dir = .\nwebhcat.log.file = webhcat.log\n\nlog4j.rootLogger = ${webhcat.root.logger}\n\n# Logging Threshold\nlog4j.threshhold = DEBUG\n\nlog4j.appender.standard = org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.standard.File = ${webhcat.log.dir}/${webhcat.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern = .yyyy-MM-dd\n\nlog4j.appender.DRFA.layout = org.apache.log4j.PatternLayout\n\nlog4j.appender.standard.layout = org.apache.log4j.PatternLayout\nlog4j.appender.standard.layout.conversionPattern = %-5p | %d{DATE} | %c | %m%n\n\n# Class logging settings\nlog4j.logger.com.sun.jersey = DEBUG\nlog4j.logger.com.sun.jersey.spi.container.servlet.WebComponent = ERROR\nlog4j.logger.org.apache.hadoop = INFO\nlog4j.logger.org.apache.hadoop.conf = WARN\nlog4j.logger.org.apache.zookeeper = WARN\nlog4j.logger.org.eclipse.jetty = INFO"
},
"ranger-yarn-plugin-properties": {
"hadoop.rpc.protection": "",
"ranger-yarn-plugin-enabled": "No",
"REPOSITORY_CONFIG_USERNAME": "yarn",
"policy_user": "ambari-qa",
"common.name.for.certificate": "",
"REPOSITORY_CONFIG_PASSWORD": "yarn"
},
"ranger-admin-site": {
"ranger.admin.kerberos.cookie.domain": "{{ranger_host}}",
"ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
"ranger.kms.service.user.hdfs": "",
"ranger.spnego.kerberos.principal": "*",
"ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}",
"ranger.plugins.hive.serviceuser": "hive",
"ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab",
"ranger.plugins.kms.serviceuser": "kms",
"ranger.service.https.attrib.ssl.enabled": "false",
"ranger.sso.browser.useragent": "Mozilla,chrome",
"ranger.jpa.jdbc.url": "jdbc:mysql://mn01.vagrant:3306/ranger",
"ranger.plugins.hbase.serviceuser": "hbase",
"ranger.plugins.hdfs.serviceuser": "hdfs",
"xasecure.audit.jaas.Client.option.useKeyTab": "true",
"ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}",
"ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net",
"ranger.plugins.knox.serviceuser": "knox",
"ranger.ldap.base.dn": "dc=example,dc=com",
"ranger.sso.publicKey": "",
"ranger.admin.kerberos.cookie.path": "/",
"ranger.service.https.attrib.clientAuth": "want",
"xasecure.audit.jaas.Client.option.serviceName": "solr",
"ranger.jpa.jdbc.user": "{{ranger_db_user}}",
"ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})",
"ranger.ldap.group.roleattribute": "cn",
"ranger.plugins.kafka.serviceuser": "kafka",
"ranger.admin.kerberos.principal": "rangeradmin/_HOST@HADOOP.TEST",
"ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}",
"ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
"ranger.unixauth.service.hostname": "{{ugsync_host}}",
"ranger.ldap.referral": "ignore",
"ranger.service.http.port": "6080",
"ranger.ldap.user.searchfilter": "(uid={0})",
"ranger.plugins.atlas.serviceuser": "atlas",
"ranger.truststore.password": "changeit",
"ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
"ranger.audit.solr.password": "NONE",
"ranger.audit.solr.zookeepers": "NONE",
"ranger.lookup.kerberos.principal": "rangerlookup/_HOST@HADOOP.TEST",
"xasecure.audit.jaas.Client.option.principal": "rangeradmin/_HOST@HADOOP.TEST",
"ranger.is.solr.kerberised": "{{ranger_is_solr_kerberised}}",
"ranger.service.https.port": "6182",
"ranger.jpa.audit.jdbc.url": "jdbc:mysql://mn01.vagrant:3306/rangeraudit",
"ranger.externalurl": "{{ranger_external_url}}",
"ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
"ranger.kms.service.user.hive": "",
"ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks",
"ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}",
"ranger.service.host": "{{ranger_host}}",
"ranger.service.https.attrib.keystore.keyalias": "rangeradmin",
"xasecure.audit.jaas.Client.option.keyTab": "/etc/security/keytabs/rangeradmin.service.keytab",
"ranger.service.https.attrib.keystore.pass": "xasecure",
"ranger.unixauth.remote.login.enabled": "true",
"ranger.jpa.jdbc.credential.alias": "rangeradmin",
"xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
"xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
"ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}",
"ranger.audit.solr.username": "ranger_solr",
"ranger.sso.enabled": "false",
"ranger.audit.solr.urls": "",
"ranger.ldap.ad.domain": "",
"ranger.plugins.yarn.serviceuser": "yarn",
"ranger.audit.source.type": "solr",
"ranger.ldap.url": "{{ranger_ug_ldap_url}}",
"ranger.authentication.method": "UNIX",
"ranger.service.http.enabled": "true",
"xasecure.audit.jaas.Client.option.storeKey": "false",
"ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}",
"ranger.ldap.ad.referral": "ignore",
"ranger.ldap.ad.base.dn": "dc=example,dc=com",
"ranger.jpa.jdbc.password": "_",
"ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"ranger.sso.providerurl": "",
"ranger.plugins.storm.serviceuser": "storm",
"ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab",
"ranger.admin.kerberos.token.valid.seconds": "30",
"ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver",
"ranger.unixauth.service.port": "5151"
},
"ams-hbase-site": {
"hbase.master.info.bindAddress": "0.0.0.0",
"hbase.normalizer.enabled": "false",
"phoenix.mutate.batchSize": "10000",
"hbase.zookeeper.property.dataDir": "${hbase.tmp.dir}/zookeeper",
"hbase.regionserver.global.memstore.upperLimit": "0.5",
"phoenix.query.keepAliveMs": "300000",
"hbase.rootdir": "file:///var/lib/ambari-metrics-collector/hbase",
"hbase.replication": "false",
"dfs.client.read.shortcircuit": "true",
"hbase.hregion.majorcompaction": "0",
"hbase.hregion.memstore.block.multiplier": "4",
"hbase.hregion.memstore.flush.size": "134217728",
"hbase.regionserver.global.memstore.lowerLimit": "0.4",
"hbase.zookeeper.property.clientPort": "{{zookeeper_clientPort}}",
"phoenix.spool.directory": "${hbase.tmp.dir}/phoenix-spool",
"phoenix.query.rowKeyOrderSaltedTable": "true",
"hbase.client.scanner.timeout.period": "300000",
"phoenix.groupby.maxCacheSize": "307200000",
"hbase.normalizer.period": "600000",
"hbase.snapshot.enabled": "false",
"hbase.master.wait.on.regionservers.mintostart": "1",
"hbase.zookeeper.property.tickTime": "6000",
"phoenix.query.spoolThresholdBytes": "20971520",
"zookeeper.session.timeout": "120000",
"hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp",
"hfile.block.cache.size": "0.3",
"hbase.rpc.timeout": "300000",
"hbase.hregion.max.filesize": "4294967296",
"hbase.regionserver.port": "61320",
"hbase.regionserver.thread.compaction.small": "3",
"hbase.master.info.port": "61310",
"phoenix.coprocessor.maxMetaDataCacheSize": "20480000",
"phoenix.query.maxGlobalMemoryPercentage": "15",
"hbase.zookeeper.quorum": "{{zookeeper_quorum_hosts}}",
"hbase.regionserver.info.port": "61330",
"zookeeper.znode.parent": "/ams-hbase-secure",
"hbase.hstore.blockingStoreFiles": "200",
"hbase.master.port": "61300",
"hbase.master.normalizer.class": "org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer",
"hbase.zookeeper.leaderport": "61388",
"hbase.regionserver.thread.compaction.large": "2",
"phoenix.query.timeoutMs": "300000",
"hbase.local.dir": "${hbase.tmp.dir}/local",
"hbase.cluster.distributed": "false",
"zookeeper.session.timeout.localHBaseCluster": "120000",
"hbase.client.scanner.caching": "10000",
"phoenix.sequence.saltBuckets": "2",
"phoenix.coprocessor.maxServerCacheTimeToLiveMs": "60000",
"hbase.hstore.flusher.count": "2",
"hbase.zookeeper.peerport": "61288"
},
"ranger-ugsync-site": {
"ranger.usersync.ldap.binddn": "",
"ranger.usersync.policymgr.username": "rangerusersync",
"ranger.usersync.policymanager.mockrun": "false",
"ranger.usersync.group.searchbase": "",
"ranger.usersync.ldap.bindalias": "testldapalias",
"ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
"ranger.usersync.port": "5151",
"ranger.usersync.pagedresultssize": "500",
"ranger.usersync.group.memberattributename": "",
"ranger.usersync.kerberos.principal": "rangerusersync/_HOST@HADOOP.TEST",
"ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
"ranger.usersync.ldap.referral": "ignore",
"ranger.usersync.group.searchfilter": "",
"ranger.usersync.ldap.user.objectclass": "person",
"ranger.usersync.logdir": "{{usersync_log_dir}}",
"ranger.usersync.ldap.user.searchfilter": "",
"ranger.usersync.ldap.groupname.caseconversion": "none",
"ranger.usersync.ldap.ldapbindpassword": "",
"ranger.usersync.unix.minUserId": "500",
"ranger.usersync.policymanager.maxrecordsperapicall": "1000",
"ranger.usersync.group.nameattribute": "",
"ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
"ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
"ranger.usersync.user.searchenabled": "false",
"ranger.usersync.group.usermapsyncenabled": "true",
"ranger.usersync.ldap.bindkeystore": "",
"ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
"ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab",
"ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
"ranger.usersync.group.objectclass": "",
"ranger.usersync.ldap.user.searchscope": "sub",
"ranger.usersync.unix.password.file": "/etc/passwd",
"ranger.usersync.ldap.user.nameattribute": "",
"ranger.usersync.pagedresultsenabled": "true",
"ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
"ranger.usersync.group.search.first.enabled": "false",
"ranger.usersync.group.searchenabled": "false",
"ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
"ranger.usersync.ssl": "true",
"ranger.usersync.ldap.url": "",
"ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
"ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
"ranger.usersync.ldap.user.searchbase": "",
"ranger.usersync.ldap.username.caseconversion": "none",
"ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
"ranger.usersync.keystore.password": "UnIx529p",
"ranger.usersync.unix.group.file": "/etc/group",
"ranger.usersync.filesource.file": "/tmp/usergroup.txt",
"ranger.usersync.group.searchscope": "",
"ranger.usersync.truststore.password": "changeit",
"ranger.usersync.enabled": "true",
"ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
"ranger.usersync.filesource.text.delimiter": ","
},
"hivemetastore-site": {
"hive.service.metrics.hadoop2.component": "hivemetastore",
"hive.metastore.metrics.enabled": "true",
"hive.service.metrics.file.location": "/var/log/hive/hivemetastore-report.json",
"hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2"
},
"spark-log4j-properties": {
"content": "\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO"
},
"hbase-site": {
"hbase.regionserver.wal.codec": "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec",
"hbase.master.info.bindAddress": "0.0.0.0",
"hbase.regionserver.port": "16020",
"hbase.client.keyvalue.maxsize": "1048576",
"hbase.regionserver.keytab.file": "/etc/security/keytabs/hbase.service.keytab",
"hbase.hstore.compactionThreshold": "3",
"hbase.hregion.majorcompaction.jitter": "0.50",
"hbase.security.authentication": "kerberos",
"hbase.rootdir": "hdfs://mn01.vagrant:8020/apps/hbase/data",
"hbase.rpc.timeout": "90000",
"hbase.regionserver.handler.count": "30",
"hbase.security.authentication.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"hbase.hregion.majorcompaction": "604800000",
"hbase.rpc.protection": "authentication",
"hbase.bucketcache.size": "",
"hbase.master.kerberos.principal": "hbase/_HOST@HADOOP.TEST",
"hbase.bucketcache.percentage.in.combinedcache": "",
"hbase.hregion.memstore.flush.size": "134217728",
"hbase.superuser": "hbase",
"hbase.coprocessor.region.classes": "{{hbase_coprocessor_region_classes}}",
"hbase.zookeeper.property.clientPort": "2181",
"phoenix.functions.allowUserDefinedFunctions": " ",
"hbase.hstore.compaction.max": "10",
"hbase.master.ui.readonly": "true",
"hbase.hregion.max.filesize": "10737418240",
"hbase.bulkload.staging.dir": "/apps/hbase/staging",
"hbase.security.authentication.spnego.kerberos.principal": "HTTP/_HOST@HADOOP.TEST",
"hbase.bucketcache.ioengine": "",
"zookeeper.session.timeout": "90000",
"hbase.regionserver.global.memstore.size": "0.4",
"hbase.tmp.dir": "/tmp/hbase-${user.name}",
"hbase.region.server.rpc.scheduler.factory.class": "",
"hfile.block.cache.size": "0.40",
"hbase.regionserver.kerberos.principal": "hbase/_HOST@HADOOP.TEST",
"hbase.client.scanner.caching": "100",
"hbase.client.retries.number": "35",
"hbase.defaults.for.version.skip": "true",
"hbase.master.info.port": "16010",
"hbase.rpc.controllerfactory.class": "",
"hbase.zookeeper.quorum": "mn01.vagrant",
"hbase.regionserver.info.port": "16030",
"zookeeper.recovery.retry": "6",
"zookeeper.znode.parent": "/hbase-secure",
"hbase.zookeeper.useMulti": "true",
"hbase.hstore.blockingStoreFiles": "10",
"hbase.master.port": "16000",
"hbase.security.authorization": "true",
"hbase.master.keytab.file": "/etc/security/keytabs/hbase.service.keytab",
"phoenix.query.timeoutMs": "60000",
"hbase.local.dir": "${hbase.tmp.dir}/local",
"hbase.coprocessor.regionserver.classes": "{{hbase_coprocessor_regionserver_classes}}",
"hbase.cluster.distributed": "true",
"hbase.hregion.memstore.mslab.enabled": "true",
"dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
"hbase.coprocessor.master.classes": "{{hbase_coprocessor_master_classes}}",
"hbase.hregion.memstore.block.multiplier": "4"
},
"ams-hbase-policy": {
"security.masterregion.protocol.acl": "*",
"security.admin.protocol.acl": "*",
"security.client.protocol.acl": "*"
},
"hadoop-policy": {
"security.job.client.protocol.acl": "*",
"security.job.task.protocol.acl": "*",
"security.datanode.protocol.acl": "*",
"security.namenode.protocol.acl": "*",
"security.client.datanode.protocol.acl": "*",
"security.inter.tracker.protocol.acl": "*",
"security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
"security.client.protocol.acl": "*",
"security.refresh.policy.protocol.acl": "hadoop",
"security.admin.operations.protocol.acl": "hadoop",
"security.inter.datanode.protocol.acl": "*"
},
"hive-site": {
"javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
"hive.fetch.task.aggr": "false",
"hive.execution.engine": "tez",
"atlas.cluster.name": "{{cluster_name}}",
"hive.tez.java.opts": "-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps",
"hive.server2.thrift.http.port": "10001",
"hive.tez.min.partition.factor": "0.25",
"hive.tez.cpu.vcores": "-1",
"hive.compute.query.using.stats": "true",
"hive.stats.dbclass": "fs",
"hive.merge.size.per.task": "256000000",
"hive.fetch.task.conversion": "more",
"hive.auto.convert.sortmerge.join.to.mapjoin": "false",
"hive.server2.thrift.http.path": "cliservice",
"hive.exec.scratchdir": "/tmp/hive",
"hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
"hive.zookeeper.namespace": "hive_zookeeper_namespace",
"hive.cbo.enable": "true",
"hive.optimize.reducededuplication": "true",
"hive.optimize.bucketmapjoin": "true",
"hive.mapjoin.bucket.cache.size": "10000",
"hive.limit.optimize.enable": "true",
"hive.server2.max.start.attempts": "5",
"hive.server2.enable.doAs": "false",
"hive.exec.max.dynamic.partitions": "5000",
"hive.metastore.sasl.enabled": "true",
"hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
"hive.optimize.constant.propagation": "true",
"hive.exec.submitviachild": "false",
"hive.metastore.kerberos.principal": "hive/_HOST@HADOOP.TEST",
"hive.txn.max.open.batch": "1000",
"hive.exec.compress.output": "false",
"hive.tez.auto.reducer.parallelism": "true",
"hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
"hive.merge.mapfiles": "true",
"hive.exec.parallel.thread.number": "8",
"hive.mapjoin.optimized.hashtable": "true",
"hive.default.fileformat": "TextFile",
"hive.optimize.metadataonly": "true",
"hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
"hive.server2.thrift.max.worker.threads": "500",
"hive.optimize.sort.dynamic.partition": "false",
"hive.server2.table.type.mapping": "CLASSIC",
"hive.metastore.pre.event.listeners": "org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener",
"hive.metastore.failure.retries": "24",
"hive.merge.smallfiles.avgsize": "16000000",
"hive.tez.max.partition.factor": "2.0",
"hive.server2.transport.mode": "binary",
"hive.tez.container.size": "512",
"hive.optimize.bucketmapjoin.sortedmerge": "false",
"hive.compactor.worker.threads": "0",
"hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
"hive.map.aggr.hash.percentmemory": "0.5",
"hive.user.install.directory": "/user/",
"datanucleus.autoCreateSchema": "false",
"hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
"hive.merge.rcfile.block.level": "true",
"hive.map.aggr": "true",
"hive.metastore.client.connect.retry.delay": "5s",
"hive.security.authorization.enabled": "true",
"atlas.hook.hive.minThreads": "1",
"hive.server2.tez.default.queues": "default",
"hive.prewarm.enabled": "false",
"hive.exec.reducers.max": "1009",
"hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab",
"hive.stats.fetch.partition.stats": "true",
"hive.cli.print.header": "false",
"hive.server2.thrift.sasl.qop": "auth",
"hive.server2.support.dynamic.service.discovery": "true",
"hive.server2.thrift.port": "10000",
"hive.exec.reducers.bytes.per.reducer": "67108864",
"hive.compactor.abortedtxn.threshold": "1000",
"hive.tez.dynamic.partition.pruning.max.data.size": "104857600",
"hive.metastore.warehouse.dir": "/apps/hive/warehouse",
"hive.metastore.client.socket.timeout": "1800s",
"hive.server2.zookeeper.namespace": "hiveserver2",
"hive.prewarm.numcontainers": "3",
"hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
"hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
"atlas.hook.hive.maxThreads": "1",
"hive.auto.convert.join": "true",
"hive.enforce.bucketing": "false",
"hive.server2.authentication.spnego.keytab": "/etc/security/keytabs/spnego.service.keytab",
"hive.mapred.reduce.tasks.speculative.execution": "false",
"hive.server2.authentication.kerberos.keytab": "/etc/security/keytabs/hive.service.keytab",
"hive.exec.dynamic.partition.mode": "strict",
"hive.auto.convert.sortmerge.join": "true",
"hive.zookeeper.quorum": "mn01.vagrant:2181",
"hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
"hive.exec.parallel": "false",
"hive.exec.compress.intermediate": "false",
"hive.enforce.sorting": "true",
"hive.txn.timeout": "300",
"hive.metastore.authorization.storage.checks": "false",
"hive.exec.orc.default.stripe.size": "67108864",
"hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
"hive.server2.logging.operation.enabled": "true",
"hive.merge.tezfiles": "false",
"hive.compactor.initiator.on": "false",
"hive.auto.convert.join.noconditionaltask": "true",
"hive.server2.authentication.kerberos.principal": "hive/_HOST@HADOOP.TEST",
"hive.compactor.worker.timeout": "86400L",
"hive.optimize.null.scan": "true",
"hive.server2.tez.initialize.default.sessions": "false",
"datanucleus.cache.level2.type": "none",
"hive.stats.autogather": "true",
"hive.server2.use.SSL": "false",
"hive.exec.submit.local.task.via.child": "true",
"hive.merge.mapredfiles": "false",
"hive.vectorized.execution.enabled": "true",
"hive.cluster.delegation.token.store.zookeeper.connectString": "mn01.vagrant:2181",
"hive.map.aggr.hash.min.reduction": "0.5",
"hive.tez.log.level": "INFO",
"hive.server2.tez.sessions.per.default.queue": "1",
"hive.exec.max.dynamic.partitions.pernode": "2000",
"hive.tez.dynamic.partition.pruning": "true",
"datanucleus.fixedDatastore": "true",
"hive.limit.pushdown.memory.usage": "0.04",
"hive.security.metastore.authorization.auth.reads": "true",
"ambari.hive.db.schema.name": "hive",
"hive.vectorized.groupby.checkinterval": "4096",
"hive.smbjoin.cache.rows": "10000",
"hive.metastore.execute.setugi": "true",
"hive.zookeeper.client.port": "2181",
"hive.vectorized.groupby.maxentries": "100000",
"hive.server2.authentication.spnego.principal": "HTTP/_HOST@HADOOP.TEST",
"hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
"javax.jdo.option.ConnectionPassword": "bigdata",
"hive.exec.max.created.files": "100000",
"hive.default.fileformat.managed": "TextFile",
"hive.vectorized.execution.reduce.enabled": "false",
"hive.fetch.task.conversion.threshold": "1073741824",
"hive.orc.splits.include.file.footer": "false",
"hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
"hive.merge.orcfile.stripe.level": "true",
"hive.exec.failure.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
"hive.server2.allow.user.substitution": "true",
"hive.optimize.index.filter": "true",
"hive.exec.orc.encoding.strategy": "SPEED",
"hive.metastore.connect.retries": "24",
"hive.metastore.server.max.threads": "100000",
"hive.exec.orc.compression.strategy": "SPEED",
"hive.vectorized.groupby.flush.percent": "0.1",
"hive.metastore.uris": "thrift://mn01.vagrant:9083",
"hive.enforce.sortmergebucketmapjoin": "true",
"hive.auto.convert.join.noconditionaltask.size": "143165576",
"javax.jdo.option.ConnectionUserName": "hive",
"hive.compactor.delta.num.threshold": "10",
"hive.exec.dynamic.partition": "true",
"hive.server2.authentication": "KERBEROS",
"hive.stats.fetch.column.stats": "false",
"javax.jdo.option.ConnectionURL": "jdbc:mysql://mn01.vagrant/hive?createDatabaseIfNotExist=true",
"hive.orc.compute.splits.num.threads": "10",
"hive.tez.smb.number.waves": "0.5",
"hive.convert.join.bucket.mapjoin.tez": "false",
"hive.optimize.reducededuplication.min.reducer": "4",
"hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs",
"hive.tez.input.format": "org.apache.hadoop.hive.ql.io.HiveInputFormat",
"hive.exec.orc.default.compress": "ZLIB",
"hive.support.concurrency": "false",
"hive.compactor.check.interval": "300L",
"hive.compactor.delta.pct.threshold": "0.1f",
"hive.map.aggr.hash.force.flush.memory.threshold": "0.9",
"hive.server2.authentication.ldap.url": " "
},
"hive-interactive-env": {
"content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n if [ -z \"$DEBUG\" ]; then\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n else\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n fi\n fi\n\n # The heap size of the jvm stared by hive shell script can be controlled via:\n\n if [ \"$SERVICE\" = \"metastore\" ]; then\n export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n else\n export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n fi\n\n export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS -Xmx${HADOOP_HEAPSIZE}m\"\n\n # Larger heap size may be required when running queries over large number of files or partitions.\n # By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be\n # appropriate for hive server (hwi etc).\n\n\n # Set HADOOP_HOME to point to a specific hadoop install directory\n HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n # Hive Configuration Directory can be controlled by:\n export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}\n\n # Add additional hcatalog jars\n if [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n else\n export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar\n fi\n\n export METASTORE_PORT={{hive_metastore_port}}\n\n # Spark assembly contains a conflicting copy of HiveConf from hive-1.2\n export HIVE_SKIP_SPARK_ASSEMBLY=true",
"llap_headroom_space": "6144",
"llap_heap_size": "0",
"num_llap_nodes": "1",
"llap_queue_capacity": "0",
"llap_app_name": "llap0",
"enable_hive_interactive": "false",
"llap_java_opts": "-XX:+AlwaysPreTouch {% if java_version > 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:MetaspaceSize=1024m -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}",
"num_retries_for_checking_llap_status": "10",
"slider_am_container_mb": "341",
"hive_server_interactive_host": "localhost",
"llap_log_level": "INFO"
},
"falcon-atlas-application.properties": {
"atlas.hook.falcon.synchronous": "false",
"atlas.hook.falcon.numRetries": "3",
"atlas.hook.falcon.queueSize": "1000",
"atlas.jaas.KafkaClient.option.principal": "falcon/_HOST@HADOOP.TEST",
"atlas.jaas.KafkaClient.option.keyTab": "/etc/security/keytabs/falcon.service.keytab",
"atlas.hook.falcon.maxThreads": "5",
"atlas.hook.falcon.minThreads": "5",
"atlas.hook.falcon.keepAliveTime": "10"
},
"hive-env": {
"hive.client.heapsize": "512",
"hive.heapsize": "512",
"hive_user_nproc_limit": "16000",
"hcat_user": "hcat",
"hive_user_nofile_limit": "32000",
"hive_database_type": "mysql",
"hive_ambari_database": "MySQL",
"webhcat_user": "hcat",
"hcat_pid_dir": "/var/run/webhcat",
"hive_security_authorization": "Ranger",
"content": "\n export HADOOP_USER_CLASSPATH_FIRST=true #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB\n if [ \"$SERVICE\" = \"cli\" ]; then\n if [ -z \"$DEBUG\" ]; then\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit\"\n else\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n fi\n fi\n\n # The heap size of the jvm stared by hive shell script can be controlled via:\n\n if [ \"$SERVICE\" = \"metastore\" ]; then\n export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n else\n export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n fi\n\n export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS -Xmx${HADOOP_HEAPSIZE}m\"\n\n # Larger heap size may be required when running queries over large number of files or partitions.\n # By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be\n # appropriate for hive server (hwi etc).\n\n\n # Set HADOOP_HOME to point to a specific hadoop install directory\n HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}\n\n # Hive Configuration Directory can be controlled by:\n export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}\n\n # Folder containing extra libraries required for hive compilation/execution can be controlled by:\n if [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then\n export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n fi\n elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n fi\n\n export METASTORE_PORT={{hive_metastore_port}}\n\n {% if sqla_db_used or lib_dir_available %}\n export LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n export JAVA_LIBRARY_PATH=\"$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n {% endif %}",
"hive_timeline_logging_enabled": "true",
"hive_database_name": "hive",
"hive_exec_orc_storage_strategy": "SPEED",
"hive_pid_dir": "/var/run/hive",
"hive_log_dir": "/var/log/hive",
"hive_txn_acid": "off",
"hive_user": "hive",
"hive.metastore.heapsize": "1024",
"hcat_log_dir": "/var/log/webhcat",
"hive_database": "Existing MySQL Database"
},
"ranger-yarn-policymgr-ssl": {
"xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks",
"xasecure.policymgr.clientssl.truststore.password": "changeit",
"xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks",
"xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
},
"yarn-site": {
"yarn.timeline-service.http-authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"yarn.resourcemanager.webapp.address": "mn01.vagrant:8088",
"yarn.node-labels.enabled": "false",
"yarn.resourcemanager.scheduler.monitor.enable": "false",
"yarn.resourcemanager.zk-num-retries": "1000",
"yarn.timeline-service.http-authentication.signature.secret.file": "",
"yarn.timeline-service.bind-host": "0.0.0.0",
"yarn.resourcemanager.ha.enabled": "false",
"yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn",
"yarn.timeline-service.http-authentication.signature.secret": "",
"yarn.timeline-service.webapp.address": "mn01.vagrant:8188",
"yarn.nodemanager.principal": "nm/_HOST@HADOOP.TEST",
"yarn.timeline-service.enabled": "true",
"yarn.nodemanager.recovery.enabled": "true",
"yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath": "/usr/hdp/${hdp.version}/spark/hdpLib/*",
"yarn.timeline-service.http-authentication.type": "kerberos",
"yarn.nodemanager.container-metrics.unregister-delay-ms": "60000",
"yarn.nodemanager.keytab": "/etc/security/keytabs/nm.service.keytab",
"yarn.resourcemanager.webapp.https.address": "mn01.vagrant:8090",
"yarn.timeline-service.entity-group-fs-store.summary-store": "org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore",
"yarn.nodemanager.aux-services.spark2_shuffle.classpath": "{{stack_root}}/${hdp.version}/spark2/aux/*",
"yarn.resourcemanager.hostname": "mn01.vagrant",
"yarn.resourcemanager.webapp.spnego-principal": "HTTP/_HOST@HADOOP.TEST",
"yarn.resourcemanager.am.max-attempts": "2",
"yarn.nodemanager.log-aggregation.debug-enabled": "false",
"yarn.scheduler.maximum-allocation-vcores": "4",
"yarn.resourcemanager.system-metrics-publisher.enabled": "true",
"yarn.nodemanager.vmem-pmem-ratio": "2.1",
"yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude",
"yarn.nodemanager.linux-container-executor.cgroups.mount": "false",
"yarn.timeline-service.http-authentication.cookie.path": "",
"yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10",
"yarn.nodemanager.aux-services.spark2_shuffle.class": "org.apache.spark.network.yarn.YarnShuffleService",
"yarn.nodemanager.webapp.spnego-principal": "HTTP/_HOST@HADOOP.TEST",
"yarn.log.server.url": "http://mn01.vagrant:19888/jobhistory/logs",
"yarn.timeline-service.keytab": "/etc/security/keytabs/yarn.service.keytab",
"yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*",
"yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false",
"yarn.timeline-service.entity-group-fs-store.active-dir": "/ats/active/",
"yarn.resourcemanager.keytab": "/etc/security/keytabs/rm.service.keytab",
"yarn.resourcemanager.principal": "rm/_HOST@HADOOP.TEST",
"yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
"yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false",
"yarn.nodemanager.remote-app-log-dir-suffix": "logs",
"yarn.resourcemanager.connect.max-wait.ms": "900000",
"yarn.resourcemanager.address": "mn01.vagrant:8050",
"yarn.timeline-service.http-authentication.token.validity": "",
"yarn.resourcemanager.proxy-user-privileges.enabled": "true",
"yarn.scheduler.maximum-allocation-mb": "4096",
"yarn.nodemanager.container-monitor.interval-ms": "3000",
"yarn.node-labels.fs-store.retry-policy-spec": "2000, 500",
"yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
"yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
"yarn.timeline-service.address": "mn01.vagrant:10200",
"yarn.log-aggregation-enable": "true",
"yarn.nodemanager.delete.debug-delay-sec": "0",
"yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore",
"yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
"yarn.timeline-service.client.retry-interval-ms": "1000",
"yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes": "org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl,org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin",
"yarn.nodemanager.aux-services.spark_shuffle.classpath": "{{stack_root}}/${hdp.version}/spark/aux/*",
"yarn.nodemanager.aux-services": "mapreduce_shuffle,spark_shuffle,spark2_shuffle",
"yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler",
"yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90",
"yarn.resourcemanager.zk-timeout-ms": "10000",
"yarn.resourcemanager.fs.state-store.uri": " ",
"yarn.nodemanager.linux-container-executor.group": "hadoop",
"yarn.nodemanager.remote-app-log-dir": "/app-logs",
"hadoop.registry.zk.quorum": "mn01.vagrant:2181",
"yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds": "3600",
"yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500",
"yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore",
"yarn.timeline-service.http-authentication.proxyuser.root.groups": "*",
"yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
"yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25",
"yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}",
"yarn.resourcemanager.work-preserving-recovery.enabled": "true",
"yarn.resourcemanager.resource-tracker.address": "mn01.vagrant:8025",
"yarn.nodemanager.health-checker.script.timeout-ms": "60000",
"yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
"yarn.nodemanager.resource.memory-mb": "4096",
"yarn.timeline-service.http-authentication.kerberos.name.rules": "",
"yarn.nodemanager.resource.cpu-vcores": "4",
"yarn.resourcemanager.proxyusers.*.users": "",
"yarn.timeline-service.ttl-ms": "2678400000",
"yarn.nodemanager.resource.percentage-physical-cpu-limit": "80",
"yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
"yarn.resourcemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
"yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "3600",
"yarn.nodemanager.log.retain-second": "604800",
"yarn.timeline-service.principal": "yarn/_HOST@HADOOP.TEST",
"yarn.timeline-service.state-store-class": "org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore",
"yarn.nodemanager.log-dirs": "/hadoop/yarn/log",
"yarn.resourcemanager.proxyusers.*.groups": "",
"yarn.timeline-service.client.max-retries": "30",
"yarn.nodemanager.health-checker.interval-ms": "135000",
"yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
"yarn.nodemanager.vmem-check-enabled": "false",
"yarn.acl.enable": "true",
"yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600",
"yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
"yarn.client.nodemanager-connect.max-wait-ms": "60000",
"yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true",
"yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000",
"yarn.timeline-service.http-authentication.proxyusers.*.users": "",
"yarn.timeline-service.http-authentication.signer.secret.provider": "",
"yarn.resourcemanager.bind-host": "0.0.0.0",
"yarn.timeline-service.http-authentication.cookie.domain": "",
"yarn.http.policy": "HTTP_ONLY",
"yarn.timeline-service.version": "1.5",
"yarn.resourcemanager.zk-address": "mn01.vagrant:2181",
"yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state",
"yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor",
"yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
"yarn.timeline-service.entity-group-fs-store.retain-seconds": "604800",
"yarn.nodemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
"yarn.scheduler.minimum-allocation-vcores": "1",
"yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
"yarn.scheduler.minimum-allocation-mb": "512",
"yarn.timeline-service.ttl-enable": "true",
"yarn.resourcemanager.scheduler.address": "mn01.vagrant:8030",
"yarn.log-aggregation.retain-seconds": "2592000",
"yarn.nodemanager.address": "0.0.0.0:45454",
"hadoop.registry.rm.enabled": "false",
"yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000",
"yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000",
"yarn.resourcemanager.zk-state-store.parent-path": "/rmstore",
"yarn.nodemanager.log-aggregation.compression-type": "gz",
"yarn.timeline-service.http-authentication.kerberos.principal": "HTTP/_HOST@HADOOP.TEST",
"yarn.nodemanager.log-aggregation.num-log-files-per-app": "30",
"yarn.resourcemanager.recovery.enabled": "true",
"yarn.timeline-service.recovery.enabled": "true",
"yarn.nodemanager.bind-host": "0.0.0.0",
"yarn.resourcemanager.zk-retry-interval-ms": "1000",
"yarn.nodemanager.linux-container-executor.cgroups.mount-path": "",
"yarn.admin.acl": "yarn,dr.who",
"yarn.timeline-service.http-authentication.proxyuser.root.hosts": "mn01.vagrant",
"yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels",
"yarn.timeline-service.entity-group-fs-store.scan-interval-seconds": "60",
"yarn.timeline-service.entity-group-fs-store.done-dir": "/ats/done/",
"yarn.nodemanager.aux-services.spark_shuffle.class": "org.apache.spark.network.yarn.YarnShuffleService",
"yarn.client.nodemanager-connect.retry-interval-ms": "10000",
"yarn.resourcemanager.admin.address": "mn01.vagrant:8141",
"yarn.timeline-service.webapp.https.address": "mn01.vagrant:8190",
"yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
"yarn.resourcemanager.connect.retry-interval.ms": "30000",
"yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000",
"yarn.resourcemanager.proxyusers.*.hosts": ""
},
"ranger-hbase-plugin-properties": {
"hadoop.rpc.protection": "",
"REPOSITORY_CONFIG_USERNAME": "hbase",
"policy_user": "ambari-qa",
"common.name.for.certificate": "",
"REPOSITORY_CONFIG_PASSWORD": "hbase",
"ranger-hbase-plugin-enabled": "Yes"
},
"falcon-client.properties": {
"falcon.url": "http://{{falcon_host}}:{{falcon_port}}"
},
"webhcat-site": {
"templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://mn01.vagrant:9083,hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@HADOOP.TEST",
"templeton.hadoop.conf.dir": "/etc/hadoop/conf",
"templeton.kerberos.secret": "secret",
"templeton.port": "50111",
"templeton.hive.home": "hive.tar.gz/hive",
"templeton.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"templeton.libjars": "/usr/hdp/${hdp.version}/zookeeper/zookeeper.jar,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar",
"templeton.exec.timeout": "60000",
"templeton.hcat.home": "hive.tar.gz/hive/hcatalog",
"templeton.sqoop.home": "sqoop.tar.gz/sqoop",
"templeton.python": "${env.PYTHON_CMD}",
"templeton.kerberos.principal": "HTTP/_HOST@HADOOP.TEST",
"templeton.sqoop.archive": "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz",
"templeton.hcat": "/usr/hdp/${hdp.version}/hive/bin/hcat",
"templeton.hadoop": "/usr/hdp/${hdp.version}/hadoop/bin/hadoop",
"templeton.override.enabled": "false",
"templeton.jar": "/usr/hdp/${hdp.version}/hive/share/webhcat/svr/lib/hive-webhcat-*.jar",
"templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
"templeton.hive.extra.files": "/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib",
"webhcat.proxyuser.root.hosts": "mn01.vagrant",
"webhcat.proxyuser.root.groups": "*",
"templeton.hive.path": "hive.tar.gz/hive/bin/hive",
"templeton.pig.path": "pig.tar.gz/pig/bin/pig",
"templeton.sqoop.path": "sqoop.tar.gz/sqoop/bin/sqoop",
"templeton.zookeeper.hosts": "mn01.vagrant:2181",
"templeton.hive.archive": "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz",
"templeton.streaming.jar": "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar",
"templeton.pig.archive": "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz",
"templeton.hadoop.queue.name": "default"
},
"kafka-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\nkafka.logs.dir=logs\n\nlog4j.rootLogger=INFO, stdout\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log\nlog4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log\nlog4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log\nlog4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log\nlog4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log\nlog4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\n# Turn on all our debugging info\n#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender\n#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender\n#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG\nlog4j.logger.kafka=INFO, kafkaAppender\nlog4j.logger.kafka.network.RequestChannel$=WARN, requestAppender\nlog4j.additivity.kafka.network.RequestChannel$=false\n\n#log4j.logger.kafka.network.Processor=TRACE, requestAppender\n#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender\n#log4j.additivity.kafka.server.KafkaApis=false\nlog4j.logger.kafka.request.logger=WARN, requestAppender\nlog4j.additivity.kafka.request.logger=false\n\nlog4j.logger.kafka.controller=TRACE, controllerAppender\nlog4j.additivity.kafka.controller=false\n\nlog4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender\nlog4j.additivity.kafka.log.LogCleaner=false\n\nlog4j.logger.state.change.logger=TRACE, stateChangeAppender\nlog4j.additivity.state.change.logger=false"
},
"llap-daemon-log4j": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# This is the log4j2 properties file used by llap-daemons. There's several loggers defined, which\n# can be selected while configuring LLAP.\n# Based on the one selected - UI links etc need to be manipulated in the system.\n# Note: Some names and logic is common to this file and llap LogHelpers. Make sure to change that\n# as well, if changing this file.\n\nstatus = INFO\nname = LlapDaemonLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.llap.daemon.log.level = INFO\nproperty.llap.daemon.root.logger = console\nproperty.llap.daemon.log.dir = .\nproperty.llap.daemon.log.file = llapdaemon.log\nproperty.llap.daemon.historylog.file = llapdaemon_history.log\nproperty.llap.daemon.log.maxfilesize = 256MB\nproperty.llap.daemon.log.maxbackupindex = 240\n\n# list of all appenders\nappenders = console, RFA, HISTORYAPPENDER, query-routing\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{ISO8601} %5p [%t (%X{fragmentId})] %c{2}: %m%n\n\n# rolling file appender\nappender.RFA.type = RollingRandomAccessFile\nappender.RFA.name = RFA\nappender.RFA.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}\nappender.RFA.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}_%d{yyyy-MM-dd-HH}_%i.done\nappender.RFA.layout.type = PatternLayout\nappender.RFA.layout.pattern = %d{ISO8601} %-5p [%t (%X{fragmentId})] %c: %m%n\nappender.RFA.policies.type = Policies\nappender.RFA.policies.time.type = TimeBasedTriggeringPolicy\nappender.RFA.policies.time.interval = 1\nappender.RFA.policies.time.modulate = true\nappender.RFA.policies.size.type = SizeBasedTriggeringPolicy\nappender.RFA.policies.size.size = ${sys:llap.daemon.log.maxfilesize}\nappender.RFA.strategy.type = DefaultRolloverStrategy\nappender.RFA.strategy.max = ${sys:llap.daemon.log.maxbackupindex}\n\n# history file appender\nappender.HISTORYAPPENDER.type = RollingRandomAccessFile\nappender.HISTORYAPPENDER.name = HISTORYAPPENDER\nappender.HISTORYAPPENDER.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}\nappender.HISTORYAPPENDER.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}_%d{yyyy-MM-dd}_%i.done\nappender.HISTORYAPPENDER.layout.type = PatternLayout\nappender.HISTORYAPPENDER.layout.pattern = %m%n\nappender.HISTORYAPPENDER.policies.type = Policies\nappender.HISTORYAPPENDER.policies.size.type = SizeBasedTriggeringPolicy\nappender.HISTORYAPPENDER.policies.size.size = ${sys:llap.daemon.log.maxfilesize}\nappender.HISTORYAPPENDER.policies.time.type = TimeBasedTriggeringPolicy\nappender.HISTORYAPPENDER.policies.time.interval = 1\nappender.HISTORYAPPENDER.policies.time.modulate = true\nappender.HISTORYAPPENDER.strategy.type = DefaultRolloverStrategy\nappender.HISTORYAPPENDER.strategy.max = ${sys:llap.daemon.log.maxbackupindex}\n\n# queryId based routing file appender\nappender.query-routing.type = Routing\nappender.query-routing.name = query-routing\nappender.query-routing.routes.type = Routes\nappender.query-routing.routes.pattern = $${ctx:queryId}\n#Purge polciy for query-based Routing Appender\nappender.query-routing.purgePolicy.type = LlapRoutingAppenderPurgePolicy\n# Note: Do not change this name without changing the corresponding entry in LlapConstants\nappender.query-routing.purgePolicy.name = llapLogPurgerQueryRouting\n# default route\nappender.query-routing.routes.route-default.type = Route\nappender.query-routing.routes.route-default.key = $${ctx:queryId}\nappender.query-routing.routes.route-default.ref = RFA\n# queryId based route\nappender.query-routing.routes.route-mdc.type = Route\nappender.query-routing.routes.route-mdc.file-mdc.type = LlapWrappedAppender\nappender.query-routing.routes.route-mdc.file-mdc.name = IrrelevantName-query-routing\nappender.query-routing.routes.route-mdc.file-mdc.app.type = RandomAccessFile\nappender.query-routing.routes.route-mdc.file-mdc.app.name = file-mdc\nappender.query-routing.routes.route-mdc.file-mdc.app.fileName = ${sys:llap.daemon.log.dir}/${ctx:queryId}-${ctx:dagId}.log\nappender.query-routing.routes.route-mdc.file-mdc.app.layout.type = PatternLayout\nappender.query-routing.routes.route-mdc.file-mdc.app.layout.pattern = %d{ISO8601} %5p [%t (%X{fragmentId})] %c{2}: %m%n\n\n# list of all loggers\nloggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, HistoryLogger, LlapIoImpl, LlapIoOrc, LlapIoCache, LlapIoLocking\n\nlogger.LlapIoImpl.name = LlapIoImpl\nlogger.LlapIoImpl.level = INFO\n\nlogger.LlapIoOrc.name = LlapIoOrc\nlogger.LlapIoOrc.level = WARN\n\nlogger.LlapIoCache.name = LlapIoCache\nlogger.LlapIOCache.level = WARN\n\nlogger.LlapIoLocking.name = LlapIoLocking\nlogger.LlapIoLocking.level = WARN\n\nlogger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn\nlogger.NIOServerCnxn.level = WARN\n\nlogger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO\nlogger.ClientCnxnSocketNIO.level = WARN\n\nlogger.DataNucleus.name = DataNucleus\nlogger.DataNucleus.level = ERROR\n\nlogger.Datastore.name = Datastore\nlogger.Datastore.level = ERROR\n\nlogger.JPOX.name = JPOX\nlogger.JPOX.level = ERROR\n\nlogger.HistoryLogger.name = org.apache.hadoop.hive.llap.daemon.HistoryLogger\nlogger.HistoryLogger.level = INFO\nlogger.HistoryLogger.additivity = false\nlogger.HistoryLogger.appenderRefs = HistoryAppender\nlogger.HistoryLogger.appenderRef.HistoryAppender.ref = HISTORYAPPENDER\n\n# root logger\nrootLogger.level = ${sys:llap.daemon.log.level}\nrootLogger.appenderRefs = root\nrootLogger.appenderRef.root.ref = ${sys:llap.daemon.root.logger}"
},
"hive-log4j": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Define some default values that can be overridden by system properties\nhive.log.threshold=ALL\nhive.root.logger=INFO,DRFA\nhive.log.dir=${java.io.tmpdir}/${user.name}\nhive.log.file=hive.log\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hive.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshold=${hive.log.threshold}\n\n#\n# Daily Rolling File Appender\n#\n# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files\n# for different CLI session.\n#\n# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n\nlog4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\nlog4j.appender.console.encoding=UTF-8\n\n#custom logging levels\n#log4j.logger.xxx=DEBUG\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=ERROR,DRFA\nlog4j.category.Datastore=ERROR,DRFA\nlog4j.category.Datastore.Schema=ERROR,DRFA\nlog4j.category.JPOX.Datastore=ERROR,DRFA\nlog4j.category.JPOX.Plugin=ERROR,DRFA\nlog4j.category.JPOX.MetaData=ERROR,DRFA\nlog4j.category.JPOX.Query=ERROR,DRFA\nlog4j.category.JPOX.General=ERROR,DRFA\nlog4j.category.JPOX.Enhancer=ERROR,DRFA\n\n\n# Silence useless ZK logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA"
},
"ranger-hdfs-security": {
"ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
"ranger.plugin.hdfs.service.name": "{{repo_name}}",
"ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
"ranger.plugin.hdfs.policy.pollIntervalMs": "30000",
"ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}",
"ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
"xasecure.add-hadoop-authorization": "true"
},
"sqoop-atlas-application.properties": {
"atlas.jaas.KafkaClient.option.renewTicket": "true",
"atlas.jaas.KafkaClient.option.useTicketCache": "true"
},
"mapred-env": {
"jobhistory_heapsize": "900",
"mapred_log_dir_prefix": "/var/log/hadoop-mapreduce",
"mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce",
"content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\nexport HADOOP_OPTS=\"-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS\"\nexport JAVA_LIBRARY_PATH=\"${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}\"",
"mapred_user_nofile_limit": "32768",
"mapred_user_nproc_limit": "65536",
"mapred_user": "mapred"
},
"ranger-hive-audit": {
"xasecure.audit.destination.solr.zookeepers": "NONE",
"xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
"xasecure.audit.destination.solr.urls": "",
"xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hive/audit/solr/spool",
"xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hive/audit/hdfs/spool",
"xasecure.audit.jaas.Client.option.storeKey": "false",
"xasecure.audit.destination.hdfs": "true",
"xasecure.audit.is.enabled": "true",
"xasecure.audit.jaas.Client.option.useKeyTab": "true",
"xasecure.audit.destination.solr": "false",
"xasecure.audit.jaas.Client.option.keyTab": "/etc/security/keytabs/hive.service.keytab",
"xasecure.audit.provider.summary.enabled": "false",
"xasecure.audit.jaas.Client.option.principal": "hive/_HOST@HADOOP.TEST",
"xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
"xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
"xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
"xasecure.audit.jaas.Client.option.serviceName": "solr"
},
"sqoop-env": {
"content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-{{hbase_home}}}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-{{hive_home}}}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
"sqoop_user": "sqoop",
"jdbc_drivers": " "
},
"livy-conf": {
"livy.server.csrf_protection.enabled": "true",
"livy.impersonation.enabled": "true",
"livy.server.port": "8998",
"livy.server.session.timeout": "3600000",
"livy.superusers": "zeppelin-examplecluster",
"livy.server.auth.type": "kerberos",
"livy.environment": "production"
},
"ranger-hbase-audit": {
"xasecure.audit.destination.solr.zookeepers": "NONE",
"xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
"xasecure.audit.destination.solr.urls": "",
"xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hbase/audit/solr/spool",
"xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hbase/audit/hdfs/spool",
"xasecure.audit.jaas.Client.option.storeKey": "false",
"xasecure.audit.destination.hdfs": "true",
"xasecure.audit.is.enabled": "true",
"xasecure.audit.jaas.Client.option.useKeyTab": "true",
"xasecure.audit.destination.solr": "false",
"xasecure.audit.jaas.Client.option.keyTab": "/etc/security/keytabs/hbase.service.keytab",
"xasecure.audit.provider.summary.enabled": "true",
"xasecure.audit.jaas.Client.option.principal": "hbase/_HOST@HADOOP.TEST",
"xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
"xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
"xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
"xasecure.audit.jaas.Client.option.serviceName": "solr"
},
"livy-env": {
"livy_group": "livy",
"spark_home": "/usr/hdp/current/spark-client",
"content": "\n #!/usr/bin/env bash\n\n # - SPARK_HOME Spark which you would like to use in livy\n # - HADOOP_CONF_DIR Directory containing the Hadoop / YARN configuration to use.\n # - LIVY_LOG_DIR Where log files are stored. (Default: ${LIVY_HOME}/logs)\n # - LIVY_PID_DIR Where the pid file is stored. (Default: /tmp)\n # - LIVY_SERVER_JAVA_OPTS Java Opts for running livy server (You can set jvm related setting here, like jvm memory/gc algorithm and etc.)\n export SPARK_HOME=/usr/hdp/current/spark-client\n export JAVA_HOME={{java_home}}\n export HADOOP_CONF_DIR=/etc/hadoop/conf\n export LIVY_LOG_DIR={{livy_log_dir}}\n export LIVY_PID_DIR={{livy_pid_dir}}\n export LIVY_SERVER_JAVA_OPTS=\"-Xmx2g\"",
"livy_pid_dir": "/var/run/livy",
"livy_log_dir": "/var/log/livy",
"livy_user": "livy"
},
"ranger-env": {
"xasecure.audit.destination.db": "true",
"ranger_pid_dir": "/var/run/ranger",
"ranger_solr_shards": "1",
"ranger_solr_config_set": "ranger_audits",
"ranger_solr_replication_factor": "1",
"ranger_user": "ranger",
"xml_configurations_supported": "true",
"ranger_admin_log_dir": "/var/log/ranger/admin",
"ranger-hbase-plugin-enabled": "Yes",
"ranger-yarn-plugin-enabled": "No",
"bind_anonymous": "false",
"ranger_admin_username": "amb_ranger_admin",
"admin_password": "admin",
"is_solrCloud_enabled": "false",
"ranger-storm-plugin-enabled": "No",
"ranger-hdfs-plugin-enabled": "Yes",
"ranger_group": "ranger",
"ranger-knox-plugin-enabled": "No",
"ranger-atlas-plugin-enabled": "No",
"ranger-kafka-plugin-enabled": "Yes",
"ranger_privelege_user_jdbc_url": "jdbc:mysql://localhost",
"ranger-hive-plugin-enabled": "Yes",
"xasecure.audit.destination.solr": "false",
"is_external_solrCloud_kerberos": "false",
"is_external_solrCloud_enabled": "false",
"xasecure.audit.destination.hdfs": "true",
"admin_username": "admin",
"xasecure.audit.destination.hdfs.dir": "hdfs://mn01.vagrant:8020",
"create_db_dbuser": "true",
"ranger_solr_collection_name": "ranger_audits",
"ranger_admin_password": "amb_ranger_admin",
"ranger_usersync_log_dir": "/var/log/ranger/usersync"
},
"ams-site": {
"timeline.metrics.host.aggregator.minute.ttl": "604800",
"timeline.metrics.host.aggregator.minute.disabled": "false",
"timeline.metrics.cluster.aggregator.second.disabled": "false",
"timeline.metrics.service.webapp.address": "mn01.vagrant:6188",
"timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier": "2",
"timeline.metrics.cluster.aggregator.daily.ttl": "63072000",
"timeline.metrics.cluster.aggregator.second.timeslice.interval": "30",
"timeline.metrics.service.http.policy": "HTTP_ONLY",
"timeline.metrics.service.checkpointDelay": "60",
"timeline.metrics.host.aggregator.hourly.disabled": "false",
"timeline.metrics.cluster.aggregator.daily.interval": "86400",
"timeline.metrics.cluster.aggregator.hourly.ttl": "31536000",
"timeline.metrics.host.aggregate.splitpoints": " ",
"timeline.metrics.service.watcher.delay": "30",
"timeline.metrics.cluster.aggregator.interpolation.enabled": "true",
"timeline.metrics.host.aggregator.daily.disabled": "false",
"timeline.metrics.service.watcher.timeout": "30",
"timeline.metrics.sink.collection.period": "10",
"timeline.metrics.hbase.compression.scheme": "SNAPPY",
"timeline.metrics.hbase.fifo.compaction.enabled": "true",
"timeline.metrics.cluster.aggregator.hourly.interval": "3600",
"timeline.metrics.aggregators.skip.blockcache.enabled": "false",
"phoenix.spool.directory": "/tmp",
"timeline.metrics.host.aggregator.ttl": "86400",
"timeline.metrics.sink.report.interval": "60",
"timeline.metrics.service.use.groupBy.aggregators": "true",
"timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier": "2",
"timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier": "2",
"timeline.metrics.cluster.aggregator.hourly.disabled": "false",
"timeline.metrics.host.aggregator.minute.interval": "300",
"timeline.metrics.aggregator.checkpoint.dir": "/var/lib/ambari-metrics-collector/checkpoint",
"timeline.metrics.hbase.data.block.encoding": "FAST_DIFF",
"timeline.metrics.service.cluster.aggregator.appIds": "datanode,nodemanager,hbase",
"timeline.metrics.cluster.aggregator.second.ttl": "259200",
"timeline.metrics.cluster.aggregator.minute.ttl": "2592000",
"timeline.metrics.daily.aggregator.minute.interval": "86400",
"timeline.metrics.cache.enabled": "true",
"timeline.metrics.cluster.aggregate.splitpoints": " ",
"timeline.metrics.cluster.aggregator.minute.interval": "300",
"timeline.metrics.cache.size": "150",
"phoenix.query.maxGlobalMemoryPercentage": "25",
"timeline.metrics.service.operation.mode": "embedded",
"timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier": "2",
"timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier": "2",
"timeline.metrics.hbase.init.check.enabled": "true",
"timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier": "2",
"timeline.metrics.cluster.aggregator.daily.disabled": "false",
"timeline.metrics.cluster.aggregator.minute.disabled": "false",
"timeline.metrics.service.rpc.address": "0.0.0.0:60200",
"timeline.metrics.host.aggregator.hourly.ttl": "2592000",
"timeline.metrics.service.resultset.fetchSize": "2000",
"timeline.metrics.service.watcher.initial.delay": "600",
"timeline.metrics.cache.commit.interval": "3",
"timeline.metrics.service.default.result.limit": "15840",
"timeline.metrics.host.aggregator.daily.ttl": "31536000",
"timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier": "2",
"timeline.metrics.service.watcher.disabled": "false",
"timeline.metrics.cluster.aggregator.second.interval": "120",
"timeline.metrics.host.aggregator.hourly.interval": "3600"
},
"ams-ssl-client": {
"ssl.client.truststore.password": "bigdata",
"ssl.client.truststore.type": "jks",
"ssl.client.truststore.location": "/etc/security/clientKeys/all.jks"
},
"spark-thrift-fairscheduler": {
"fairscheduler_content": "<?xml version=\"1.0\"?>\n <allocations>\n <pool name=\"default\">\n <schedulingMode>FAIR</schedulingMode>\n <weight>1</weight>\n <minShare>2</minShare>\n </pool>\n </allocations>"
},
"hbase-policy": {
"security.masterregion.protocol.acl": "*",
"security.admin.protocol.acl": "*",
"security.client.protocol.acl": "*"
},
"hive-atlas-application.properties": {
"atlas.hook.hive.synchronous": "false",
"atlas.hook.hive.queueSize": "1000",
"atlas.jaas.KafkaClient.option.principal": "hive/_HOST@HADOOP.TEST",
"atlas.jaas.KafkaClient.option.keyTab": "/etc/security/keytabs/hive.service.keytab",
"atlas.hook.hive.minThreads": "5",
"atlas.hook.hive.numRetries": "3",
"atlas.hook.hive.maxThreads": "5",
"atlas.hook.hive.keepAliveTime": "10"
},
"admin-properties": {
"audit_db_password": "bigdata",
"db_user": "ranger",
"DB_FLAVOR": "MYSQL",
"audit_db_user": "rangeraudit",
"db_password": "bigdata",
"db_root_user": "root",
"policymgr_external_url": "http:// mn01.vagrant :6080",
"db_name": "ranger",
"db_host": " mn01.vagrant :3306",
"db_root_password": "bigdata",
"audit_db_name": "rangeraudit",
"SQL_CONNECTOR_JAR": "/usr/share/java/mysql-connector-java.jar"
},
"hcat-env": {
"content": "\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n JAVA_HOME={{java64_home}}\n HCAT_PID_DIR={{hcat_pid_dir}}/\n HCAT_LOG_DIR={{hcat_log_dir}}/\n HCAT_CONF_DIR={{hcat_conf_dir}}\n HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n #DBROOT is the path where the connector jars are downloaded\n DBROOT={{hcat_dbroot}}\n USER={{hcat_user}}\n METASTORE_PORT={{hive_metastore_port}}"
},
"falcon-env": {
"falcon_port": "15000",
"falcon_pid_dir": "/var/run/falcon",
"supports_hive_dr": "true",
"falcon.emeddedmq.port": "61616",
"falcon_user": "falcon",
"falcon_local_dir": "/hadoop/falcon",
"content": "\n# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java_home}}\n\n# any additional java opts you want to set. This will apply to both client and server operations\n#export FALCON_OPTS=\n\n# any additional java opts that you want to set for client only\n#export FALCON_CLIENT_OPTS=\n\n# java heap size we want to set for the client. Default is 1024MB\n#export FALCON_CLIENT_HEAP=\n\n# any additional opts you want to set for prisim service.\n#export FALCON_PRISM_OPTS=\n\n# java heap size we want to set for the prisim service. Default is 1024MB\n#export FALCON_PRISM_HEAP=\n\n# any additional opts you want to set for falcon service.\nexport FALCON_SERVER_OPTS=\"-Dfalcon.embeddedmq={{falcon_embeddedmq_enabled}} -Dfalcon.emeddedmq.port={{falcon_emeddedmq_port}}\"\n\n# java heap size we want to set for the falcon server. Default is 1024MB\n#export FALCON_SERVER_HEAP=\n\n# What is is considered as falcon home dir. Default is the base location of the installed software\n#export FALCON_HOME_DIR=\n\n# Where log files are stored. Defatult is logs directory under the base install location\nexport FALCON_LOG_DIR={{falcon_log_dir}}\n\n# Where pid files are stored. Defatult is logs directory under the base install location\nexport FALCON_PID_DIR={{falcon_pid_dir}}\n\n# where the falcon active mq data is stored. Defatult is logs/data directory under the base install location\nexport FALCON_DATA_DIR={{falcon_embeddedmq_data}}\n\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\n#export FALCON_EXPANDED_WEBAPP_DIR=\n\n{% if falcon_atlas_support %}\n# Add the Atlas Falcon hook to the Falcon classpath\nexport FALCON_EXTRA_CLASS_PATH={{atlas_hook_cp}}${FALCON_EXTRA_CLASS_PATH}\n{% endif %}",
"falcon.embeddedmq.data": "/hadoop/falcon/embeddedmq/data",
"falcon.embeddedmq": "true",
"falcon_apps_hdfs_dir": "/apps/falcon",
"falcon_log_dir": "/var/log/falcon"
},
"zoo.cfg": {
"clientPort": "2181",
"autopurge.purgeInterval": "24",
"syncLimit": "5",
"dataDir": "/hadoop/zookeeper",
"initLimit": "10",
"tickTime": "2000",
"autopurge.snapRetainCount": "30"
},
"kafka-broker": {
"auto.leader.rebalance.enable": "true",
"kafka.ganglia.metrics.port": "8671",
"socket.send.buffer.bytes": "102400",
"num.network.threads": "3",
"log.segment.bytes": "1073741824",
"kafka.ganglia.metrics.host": "localhost",
"message.max.bytes": "1000000",
"replica.lag.time.max.ms": "10000",
"replica.fetch.min.bytes": "1",
"authorizer.class.name": "org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer",
"num.io.threads": "8",
"offsets.retention.minutes": "86400000",
"fetch.purgatory.purge.interval.requests": "10000",
"offsets.topic.compression.codec": "0",
"default.replication.factor": "1",
"port": "6667",
"num.recovery.threads.per.data.dir": "1",
"kafka.metrics.reporters": "org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter",
"log.retention.bytes": "-1",
"producer.purgatory.purge.interval.requests": "10000",
"replica.lag.max.messages": "4000",
"replica.high.watermark.checkpoint.interval.ms": "5000",
"zookeeper.connect": "mn01.vagrant:2181",
"controlled.shutdown.retry.backoff.ms": "5000",
"kafka.timeline.metrics.host": "{{metric_collector_host}}",
"kafka.ganglia.metrics.group": "kafka",
"kafka.timeline.metrics.reporter.sendInterval": "5900",
"num.partitions": "1",
"offsets.topic.segment.bytes": "104857600",
"super.users": "user:kafka",
"zookeeper.sync.time.ms": "2000",
"offset.metadata.max.bytes": "4096",
"principal.to.local.class": "kafka.security.auth.KerberosPrincipalToLocal",
"kafka.timeline.metrics.reporter.enabled": "true",
"controlled.shutdown.max.retries": "3",
"leader.imbalance.per.broker.percentage": "10",
"min.insync.replicas": "1",
"offsets.commit.required.acks": "-1",
"replica.fetch.wait.max.ms": "500",
"controlled.shutdown.enable": "true",
"log.roll.hours": "168",
"log.cleanup.interval.mins": "10",
"replica.socket.receive.buffer.bytes": "65536",
"kafka.ganglia.metrics.reporter.enabled": "true",
"kafka.timeline.metrics.truststore.path": "{{metric_truststore_path}}",
"zookeeper.connection.timeout.ms": "25000",
"delete.topic.enable": "false",
"offsets.load.buffer.size": "5242880",
"num.replica.fetchers": "1",
"socket.request.max.bytes": "104857600",
"kafka.timeline.metrics.maxRowCacheSize": "10000",
"controller.message.queue.size": "10",
"kafka.timeline.metrics.truststore.type": "{{metric_truststore_type}}",
"compression.type": "producer",
"queued.max.requests": "500",
"replica.fetch.max.bytes": "1048576",
"offsets.topic.num.partitions": "50",
"socket.receive.buffer.bytes": "102400",
"kafka.timeline.metrics.port": "{{metric_collector_port}}",
"offsets.commit.timeout.ms": "5000",
"offsets.topic.replication.factor": "3",
"external.kafka.metrics.include.prefix": "kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile,kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile,kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile,kafka.network.RequestMetrics.RequestsPerSec.request",
"offsets.retention.check.interval.ms": "600000",
"log.index.size.max.bytes": "10485760",
"security.inter.broker.protocol": "PLAINTEXTSASL",
"log.dirs": "/kafka-logs",
"listeners": "PLAINTEXT://localhost:6667",
"zookeeper.set.acl": "true",
"controller.socket.timeout.ms": "30000",
"replica.socket.timeout.ms": "30000",
"zookeeper.session.timeout.ms": "30000",
"auto.create.topics.enable": "true",
"kafka.timeline.metrics.truststore.password": "{{metric_truststore_password}}",
"external.kafka.metrics.exclude.prefix": "kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec",
"leader.imbalance.check.interval.seconds": "300",
"log.index.interval.bytes": "4096",
"log.retention.hours": "168",
"kafka.timeline.metrics.protocol": "{{metric_collector_protocol}}"
},
"ams-grafana-ini": {
"content": "\n##################### Grafana Configuration Example #####################\n#\n# Everything has defaults so you only need to uncomment things you want to\n# change\n\n# possible values : production, development\n; app_mode = production\n\n#################################### Paths ####################################\n[paths]\n# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)\n#\n;data = /var/lib/grafana\ndata = {{ams_grafana_data_dir}}\n#\n# Directory where grafana can store logs\n#\n;logs = /var/log/grafana\nlogs = {{ams_grafana_log_dir}}\n\n\n#################################### Server ####################################\n[server]\n# Protocol (http or https)\n;protocol = http\nprotocol = {{ams_grafana_protocol}}\n# The ip address to bind to, empty will bind to all interfaces\n;http_addr =\n\n# The http port to use\n;http_port = 3000\nhttp_port = {{ams_grafana_port}}\n\n# The public facing domain name used to access grafana from a browser\n;domain = localhost\n\n# Redirect to correct domain if host header does not match domain\n# Prevents DNS rebinding attacks\n;enforce_domain = false\n\n# The full public facing url\n;root_url = %(protocol)s://%(domain)s:%(http_port)s/\n\n# Log web requests\n;router_logging = false\n\n# the path relative working path\n;static_root_path = public\nstatic_root_path = /usr/lib/ambari-metrics-grafana/public\n\n# enable gzip\n;enable_gzip = false\n\n# https certs & key file\n;cert_file =\n;cert_key =\ncert_file = {{ams_grafana_cert_file}}\ncert_key = {{ams_grafana_cert_key}}\n\n#################################### Database ####################################\n[database]\n# Either \"mysql\", \"postgres\" or \"sqlite3\", it's your choice\n;type = sqlite3\n;host = 127.0.0.1:3306\n;name = grafana\n;user = root\n;password =\n\n# For \"postgres\" only, either \"disable\", \"require\" or \"verify-full\"\n;ssl_mode = disable\n\n# For \"sqlite3\" only, path relative to data_path setting\n;path = grafana.db\n\n#################################### Session ####################################\n[session]\n# Either \"memory\", \"file\", \"redis\", \"mysql\", \"postgres\", default is \"file\"\n;provider = file\n\n# Provider config options\n# memory: not have any config yet\n# file: session dir path, is relative to grafana data_path\n# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`\n# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`\n# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable\n;provider_config = sessions\n\n# Session cookie name\n;cookie_name = grafana_sess\n\n# If you use session in https only, default is false\n;cookie_secure = false\n\n# Session life time, default is 86400\n;session_life_time = 86400\n\n#################################### Analytics ####################################\n[analytics]\n# Server reporting, sends usage counters to stats.grafana.org every 24 hours.\n# No ip addresses are being tracked, only simple counters to track\n# running instances, dashboard and error counts. It is very helpful to us.\n# Change this option to false to disable reporting.\n;reporting_enabled = true\n\n# Google Analytics universal tracking code, only enabled if you specify an id here\n;google_analytics_ua_id =\n\n#################################### Security ####################################\n[security]\n# default admin user, created on startup\nadmin_user = {{ams_grafana_admin_user}}\n\n# default admin password, can be changed before first start of grafana, or in profile settings\nadmin_password = {{ams_grafana_admin_pwd}}\n\n# used for signing\n;secret_key = SW2YcwTIb9zpOOhoPsMm\n\n# Auto-login remember days\n;login_remember_days = 7\n;cookie_username = grafana_user\n;cookie_remember_name = grafana_remember\n\n# disable gravatar profile images\n;disable_gravatar = false\n\n# data source proxy whitelist (ip_or_domain:port seperated by spaces)\n;data_source_proxy_whitelist =\n\n#################################### Users ####################################\n[users]\n# disable user signup / registration\n;allow_sign_up = true\n\n# Allow non admin users to create organizations\n;allow_org_create = true\n\n# Set to true to automatically assign new users to the default organization (id 1)\n;auto_assign_org = true\n\n# Default role new users will be automatically assigned (if disabled above is set to true)\n;auto_assign_org_role = Viewer\n\n# Background text for the user field on the login page\n;login_hint = email or username\n\n#################################### Anonymous Auth ##########################\n[auth.anonymous]\n# enable anonymous access\nenabled = true\n\n# specify organization name that should be used for unauthenticated users\norg_name = Main Org.\n\n# specify role for unauthenticated users\n;org_role = Admin\n\n#################################### Github Auth ##########################\n[auth.github]\n;enabled = false\n;allow_sign_up = false\n;client_id = some_id\n;client_secret = some_secret\n;scopes = user:email,read:org\n;auth_url = https://github.com/login/oauth/authorize\n;token_url = https://github.com/login/oauth/access_token\n;api_url = https://api.github.com/user\n;team_ids =\n;allowed_organizations =\n\n#################################### Google Auth ##########################\n[auth.google]\n;enabled = false\n;allow_sign_up = false\n;client_id = some_client_id\n;client_secret = some_client_secret\n;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email\n;auth_url = https://accounts.google.com/o/oauth2/auth\n;token_url = https://accounts.google.com/o/oauth2/token\n;api_url = https://www.googleapis.com/oauth2/v1/userinfo\n;allowed_domains =\n\n#################################### Auth Proxy ##########################\n[auth.proxy]\n;enabled = false\n;header_name = X-WEBAUTH-USER\n;header_property = username\n;auto_sign_up = true\n\n#################################### Basic Auth ##########################\n[auth.basic]\n;enabled = true\n\n#################################### Auth LDAP ##########################\n[auth.ldap]\n;enabled = false\n;config_file = /etc/grafana/ldap.toml\n\n#################################### SMTP / Emailing ##########################\n[smtp]\n;enabled = false\n;host = localhost:25\n;user =\n;password =\n;cert_file =\n;key_file =\n;skip_verify = false\n;from_address = admin@grafana.localhost\n\n[emails]\n;welcome_email_on_sign_up = false\n\n#################################### Logging ##########################\n[log]\n# Either \"console\", \"file\", default is \"console\"\n# Use comma to separate multiple modes, e.g. \"console, file\"\n;mode = console, file\n\n# Buffer length of channel, keep it as it is if you don't know what it is.\n;buffer_len = 10000\n\n# Either \"Trace\", \"Debug\", \"Info\", \"Warn\", \"Error\", \"Critical\", default is \"Trace\"\n;level = Info\n\n# For \"console\" mode only\n[log.console]\n;level =\n\n# For \"file\" mode only\n[log.file]\n;level =\n# This enables automated log rotate(switch of following options), default is true\n;log_rotate = true\n\n# Max line number of single file, default is 1000000\n;max_lines = 1000000\n\n# Max size shift of single file, default is 28 means 1 << 28, 256MB\n;max_lines_shift = 28\n\n# Segment log daily, default is true\n;daily_rotate = true\n\n# Expired days of log file(delete after max days), default is 7\n;max_days = 7\n\n#################################### AMPQ Event Publisher ##########################\n[event_publisher]\n;enabled = false\n;rabbitmq_url = amqp://localhost/\n;exchange = grafana_events\n\n;#################################### Dashboard JSON files ##########################\n[dashboards.json]\n;enabled = false\n;path = /var/lib/grafana/dashboards\npath = /usr/lib/ambari-metrics-grafana/public/dashboards",
"cert_key": "/etc/ambari-metrics-grafana/conf/ams-grafana.key",
"protocol": "http",
"port": "3000",
"cert_file": "/etc/ambari-metrics-grafana/conf/ams-grafana.crt"
},
"tez-env": {
"content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}",
"tez_user": "tez"
},
"ranger-kafka-policymgr-ssl": {
"xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/kafka-broker/config/ranger-plugin-keystore.jks",
"xasecure.policymgr.clientssl.truststore.password": "changeit",
"xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file/{{credential_file}}",
"xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/kafka-broker/config/ranger-plugin-truststore.jks",
"xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file/{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
},
"spark-metrics-properties": {
"content": "\n# syntax: [instance].sink|source.[name].[options]=[value]\n\n# This file configures Spark's internal metrics system. The metrics system is\n# divided into instances which correspond to internal components.\n# Each instance can be configured to report its metrics to one or more sinks.\n# Accepted values for [instance] are \"master\", \"worker\", \"executor\", \"driver\",\n# and \"applications\". A wild card \"*\" can be used as an instance name, in\n# which case all instances will inherit the supplied property.\n#\n# Within an instance, a \"source\" specifies a particular set of grouped metrics.\n# there are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will\n# collect a Spark component's internal state. Each instance is paired with a\n# Spark source that is added automatically.\n# 2. Common sources, like JvmSource, which will collect low level state.\n# These can be added through configuration options and are then loaded\n# using reflection.\n#\n# A \"sink\" specifies where metrics are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The sink|source field specifies whether the property relates to a sink or\n# source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The [options] field is the specific property of this source or sink. The\n# source or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a new sink, set the \"class\" option to a fully qualified class\n# name (see examples below).\n# 2. Some sinks involve a polling period. The minimum allowed polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by more specific properties.\n# For example, master.sink.console.period takes precedence over\n# *.sink.console.period.\n# 4. A metrics specific configuration\n# \"spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties\" should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you want to\n# customize metrics system. You can also put the file in ${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet is added by default as a sink in master, worker and client\n# driver, you can send http request \"/metrics/json\" to get a snapshot of all the\n# registered metrics in json format. For master, requests \"/metrics/master/json\" and\n# \"/metrics/applications/json\" can be sent seperately to get metrics snapshot of\n# instance master and applications. MetricsServlet may not be configured by self.\n#\n\n## List of available sinks and their properties.\n\n# org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n\n# org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: Default: Description:\n# host NONE Hostname or multicast group of Ganglia server\n# port NONE Port of Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia network mode ('unicast' or 'multicast')\n\n# org.apache.spark.metrics.sink.JmxSink\n\n# org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# path VARIES* Path prefix from the web server root\n# sample false Whether to show entire set of samples for histograms ('false' or 'true')\n#\n# * Default path is /metrics/json for all instances except the master. The master has two paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll period\n# prefix EMPTY STRING Prefix to prepend to metric name\n\n## Examples\n# Enable JmxSink for all instances by class name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable ConsoleSink for all instances by class name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# Polling period for ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# Master instance overlap polling period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n# Enable CsvSink for all instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance overlap polling period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable jvm source for instance master, worker, driver and executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource"
},
"ams-hbase-env": {
"hbase_pid_dir": "/var/run/ambari-metrics-collector/",
"hbase_classpath_additional": "",
"regionserver_xmn_size": "128",
"max_open_files_limit": "32768",
"hbase_master_maxperm_size": "128",
"hbase_regionserver_xmn_ratio": "0.2",
"hbase_master_heapsize": "512",
"content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6+ required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nadditional_cp={{hbase_classpath_additional}}\nif [ -n \"$additional_cp\" ];\nthen\n export HBASE_CLASSPATH=${HBASE_CLASSPATH}:$additional_cp\nelse\n export HBASE_CLASSPATH=${HBASE_CLASSPATH}\nfi\n\n# The maximum amount of heap to use for hbase shell.\nexport HBASE_SHELL_OPTS=\"-Xmx256m\"\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{hbase_log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{hbase_tmp_dir}}\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{hbase_log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n\n{% if java_version < 8 %}\nexport HBASE_MASTER_OPTS=\" -XX:PermSize=64m -XX:MaxPermSize={{hbase_master_maxperm_size}} -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\nexport HBASE_REGIONSERVER_OPTS=\"-XX:MaxPermSize=128m -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% else %}\nexport HBASE_MASTER_OPTS=\" -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\nexport HBASE_REGIONSERVER_OPTS=\" -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}\n\n\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{hbase_log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{hbase_pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\nexport HBASE_ZOOKEEPER_OPTS=\"$HBASE_ZOOKEEPER_OPTS -Djava.security.auth.login.config={{ams_zookeeper_jaas_config_file}}\"\n{% endif %}\n\n# use embedded native libs\n_HADOOP_NATIVE_LIB=\"/usr/lib/ams-hbase/lib/hadoop-native/\"\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.library.path=${_HADOOP_NATIVE_LIB}\"\n\n# Unset HADOOP_HOME to avoid importing HADOOP installed cluster related configs like: /usr/hdp/2.2.0.0-2041/hadoop/conf/\nexport HADOOP_HOME={{ams_hbase_home_dir}}\n\n# Explicitly Setting HBASE_HOME for AMS HBase so that there is no conflict\nexport HBASE_HOME={{ams_hbase_home_dir}}",
"hbase_regionserver_shutdown_timeout": "30",
"hbase_regionserver_heapsize": "768",
"hbase_log_dir": "/var/log/ambari-metrics-collector",
"hbase_master_xmn_size": "102"
},
"ranger-kafka-audit": {
"xasecure.audit.destination.solr.zookeepers": "NONE",
"xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true",
"xasecure.audit.destination.solr.urls": "",
"xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/kafka/audit/solr/spool",
"xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/kafka/audit/hdfs/spool",
"xasecure.audit.jaas.Client.option.storeKey": "false",
"xasecure.audit.destination.hdfs": "true",
"xasecure.audit.is.enabled": "true",
"xasecure.audit.jaas.Client.option.useKeyTab": "true",
"xasecure.audit.destination.solr": "false",
"xasecure.audit.jaas.Client.option.keyTab": "/etc/security/keytabs/kafka.service.keytab",
"xasecure.audit.provider.summary.enabled": "true",
"xasecure.audit.jaas.Client.option.principal": "kafka/_HOST@HADOOP.TEST",
"xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
"xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
"xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
"xasecure.audit.jaas.Client.option.serviceName": "solr"
},
"yarn-env": {
"yarn_heapsize": "1024",
"yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
"yarn_user_nproc_limit": "65536",
"apptimelineserver_heapsize": "1024",
"yarn_user_nofile_limit": "32768",
"is_supported_yarn_ranger": "true",
"nodemanager_heapsize": "1024",
"content": "\n export HADOOP_YARN_HOME={{hadoop_yarn_home}}\n export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\n export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\n export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n export JAVA_HOME={{java64_home}}\n export JAVA_LIBRARY_PATH=\"${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}\"\n\n # We need to add the EWMA appender for the yarn daemons only;\n # however, YARN_ROOT_LOGGER is shared by the yarn client and the\n # daemons. This is restrict the EWMA appender to daemons only.\n INVOKER=\"${0##*/}\"\n if [ \"$INVOKER\" == \"yarn-daemon.sh\" ]; then\n export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}\n fi\n\n # User for YARN daemons\n export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n # resolve links - $0 may be a softlink\n export YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n # some Java parameters\n # export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n if [ \"$JAVA_HOME\" != \"\" ]; then\n #echo \"run java in $JAVA_HOME\"\n JAVA_HOME=$JAVA_HOME\n fi\n\n if [ \"$JAVA_HOME\" = \"\" ]; then\n echo \"Error: JAVA_HOME is not set.\"\n exit 1\n fi\n\n JAVA=$JAVA_HOME/bin/java\n JAVA_HEAP_MAX=-Xmx1000m\n\n # For setting YARN specific HEAP sizes please use this\n # Parameter and set appropriately\n YARN_HEAPSIZE={{yarn_heapsize}}\n\n # check envvars which might override default args\n if [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n JAVA_HEAP_MAX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\n fi\n\n # Resource Manager specific parameters\n\n # Specify the max Heapsize for the ResourceManager using a numerical value\n # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n # the value to 1000.\n # This value will be overridden by an Xmx setting specified in either YARN_OPTS\n # and/or YARN_RESOURCEMANAGER_OPTS.\n # If not specified, the default value will be picked from either YARN_HEAPMAX\n # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\n export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n # Specify the JVM options to be used when starting the ResourceManager.\n # These options will be appended to the options specified as YARN_OPTS\n # and therefore may override any similar flags set in YARN_OPTS\n #export YARN_RESOURCEMANAGER_OPTS=\n\n # Node Manager specific parameters\n\n # Specify the max Heapsize for the NodeManager using a numerical value\n # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n # the value to 1000.\n # This value will be overridden by an Xmx setting specified in either YARN_OPTS\n # and/or YARN_NODEMANAGER_OPTS.\n # If not specified, the default value will be picked from either YARN_HEAPMAX\n # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\n export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n # Specify the max Heapsize for the timeline server using a numerical value\n # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n # the value to 1024.\n # This value will be overridden by an Xmx setting specified in either YARN_OPTS\n # and/or YARN_TIMELINESERVER_OPTS.\n # If not specified, the default value will be picked from either YARN_HEAPMAX\n # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\n export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n # Specify the JVM options to be used when starting the NodeManager.\n # These options will be appended to the options specified as YARN_OPTS\n # and therefore may override any similar flags set in YARN_OPTS\n #export YARN_NODEMANAGER_OPTS=\n\n # so that filenames w/ spaces are handled correctly in loops below\n IFS=\n\n\n # default log directory and file\n if [ \"$YARN_LOG_DIR\" = \"\" ]; then\n YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\n fi\n if [ \"$YARN_LOGFILE\" = \"\" ]; then\n YARN_LOGFILE='yarn.log'\n fi\n\n # default policy file for service-level authorization\n if [ \"$YARN_POLICYFILE\" = \"\" ]; then\n YARN_POLICYFILE=\"hadoop-policy.xml\"\n fi\n\n # restore ordinary behaviour\n unset IFS\n\n\n YARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\n YARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\n YARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\n YARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\n YARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\n YARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\n YARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\n YARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\n export YARN_NODEMANAGER_OPTS=\"$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT\"\n export YARN_RESOURCEMANAGER_OPTS=\"$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT\"\n if [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\n fi\n YARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"\n YARN_OPTS=\"$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}\"",
"service_check.queue.name": "default",
"min_user_id": "1000",
"yarn_cgroups_enabled": "false",
"yarn_user": "yarn",
"resourcemanager_heapsize": "1024",
"yarn_log_dir_prefix": "/var/log/hadoop-yarn"
},
"beeline-log4j2": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nstatus = INFO\nname = BeelineLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.hive.log.level = WARN\nproperty.hive.root.logger = console\n\n# list of all appenders\nappenders = console\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\n\n# list of all loggers\nloggers = HiveConnection\n\n# HiveConnection logs useful info for dynamic service discovery\nlogger.HiveConnection.name = org.apache.hive.jdbc.HiveConnection\nlogger.HiveConnection.level = INFO\n\n# root logger\nrootLogger.level = ${sys:hive.log.level}\nrootLogger.appenderRefs = root\nrootLogger.appenderRef.root.ref = ${sys:hive.root.logger}"
},
"ranger-yarn-security": {
"ranger.plugin.yarn.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml",
"ranger.plugin.yarn.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
"ranger.plugin.yarn.service.name": "{{repo_name}}",
"ranger.plugin.yarn.policy.rest.url": "{{policymgr_mgr_url}}",
"ranger.plugin.yarn.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache",
"ranger.plugin.yarn.policy.pollIntervalMs": "30000"
},
"hbase-log4j": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nhbase.root.logger=INFO,console\nhbase.security.logger=INFO,console\nhbase.log.dir=.\nhbase.log.file=hbase.log\n\n# Define the root logger to the system property \"hbase.root.logger\".\nlog4j.rootLogger=${hbase.root.logger}\n\n# Logging Threshold\nlog4j.threshold=ALL\n\n#\n# Daily Rolling File Appender\n#\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Rolling File Appender properties\nhbase.log.maxfilesize=256MB\nhbase.log.maxbackupindex=20\n\n# Rolling File Appender\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n\nlog4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\nlog4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n#\n# Security audit appender\n#\nhbase.security.log.file=SecurityAuth.audit\nhbase.security.log.maxfilesize=256MB\nhbase.security.log.maxbackupindex=20\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\nlog4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.category.SecurityLogger=${hbase.security.logger}\nlog4j.additivity.SecurityLogger=false\n#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n\n#\n# Null Appender\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Custom Logging levels\n\nlog4j.logger.org.apache.zookeeper=INFO\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.hbase=INFO\n# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n#log4j.logger.org.apache.hadoop.dfs=DEBUG\n# Set this class to log INFO only otherwise its OTT\n# Enable this to get detailed connection error/retry logging.\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n\n\n# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n\n# Uncomment the below if you want to remove logging of client region caching'\n# and scan of .META. messages\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO"
},
"oozie-site": {
"oozie.service.ELService.ext.functions.coord-action-start": "\n now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,\n today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,\n yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,\n currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek,\n lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek,\n currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,\n lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,\n currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,\n lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,\n latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,\n future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,\n dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,\n instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,\n dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,\n formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,\n user=org.apache.oozie.coord.CoordELFunctions#coord_user",
"oozie.service.ELService.ext.functions.coord-job-submit-data": "\n now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,\n today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,\n yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,\n currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo,\n lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo,\n currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,\n lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,\n currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,\n lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,\n dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,\n instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,\n formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,\n dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,\n user=org.apache.oozie.coord.CoordELFunctions#coord_user",
"oozie.service.ELService.ext.functions.coord-action-create": "\n now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,\n today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,\n yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,\n currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek,\n lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek,\n currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,\n lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,\n currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,\n lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,\n latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,\n future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,\n formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,\n user=org.apache.oozie.coord.CoordELFunctions#coord_user",
"oozie.service.ELService.ext.functions.coord-sla-create": "\n instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,\n user=org.apache.oozie.coord.CoordELFunctions#coord_user",
"local.realm": "HADOOP.TEST",
"oozie.service.JPAService.jdbc.url": "jdbc:mysql://mn01.vagrant/oozie",
"oozie.service.HadoopAccessorService.kerberos.enabled": "true",
"oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials",
"oozie.authentication.kerberos.name.rules": "RULE:[1:$1@$0](ambari-qa-examplecluster@HADOOP.TEST)s/.*/ambari-qa/\nRULE:[1:$1@$0](hbase-examplecluster@HADOOP.TEST)s/.*/hbase/\nRULE:[1:$1@$0](hdfs-examplecluster@HADOOP.TEST)s/.*/hdfs/\nRULE:[1:$1@$0](spark-examplecluster@HADOOP.TEST)s/.*/spark/\nRULE:[1:$1@$0](.*@HADOOP.TEST)s/@.*//\nRULE:[2:$1@$0](amshbase@HADOOP.TEST)s/.*/ams/\nRULE:[2:$1@$0](amszk@HADOOP.TEST)s/.*/ams/\nRULE:[2:$1@$0](dn@HADOOP.TEST)s/.*/hdfs/\nRULE:[2:$1@$0](falcon@HADOOP.TEST)s/.*/falcon/\nRULE:[2:$1@$0](hbase@HADOOP.TEST)s/.*/hbase/\nRULE:[2:$1@$0](hive@HADOOP.TEST)s/.*/hive/\nRULE:[2:$1@$0](jhs@HADOOP.TEST)s/.*/mapred/\nRULE:[2:$1@$0](nm@HADOOP.TEST)s/.*/yarn/\nRULE:[2:$1@$0](nn@HADOOP.TEST)s/.*/hdfs/\nRULE:[2:$1@$0](oozie@HADOOP.TEST)s/.*/oozie/\nRULE:[2:$1@$0](rangeradmin@HADOOP.TEST)s/.*/ranger/\nRULE:[2:$1@$0](rangerusersync@HADOOP.TEST)s/.*/rangerusersync/\nRULE:[2:$1@$0](rm@HADOOP.TEST)s/.*/yarn/\nRULE:[2:$1@$0](yarn@HADOOP.TEST)s/.*/yarn/\nDEFAULT",
"oozie.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"oozie.action.retry.interval": "30",
"oozie.service.ELService.ext.functions.coord-action-create-inst": "\n now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,\n today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,\n yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,\n currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek_inst,\n lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek_inst,\n currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,\n lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,\n currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,\n lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,\n latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,\n future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,\n formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,\n user=org.apache.oozie.coord.CoordELFunctions#coord_user",
"oozie.authentication.simple.anonymous.allowed": "true",
"oozie.service.HadoopAccessorService.kerberos.principal": "oozie/_HOST@HADOOP.TEST",
"oozie.service.SparkConfigurationService.spark.configurations": "*=spark-conf",
"oozie.service.ELService.ext.functions.coord-job-submit-instances": "\n now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,\n today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,\n yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,\n currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo,\n lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo,\n currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,\n lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,\n currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,\n lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,\n formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,\n latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,\n future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo",
"oozie.service.AuthorizationService.authorization.enabled": "true",
"oozie.service.HadoopAccessorService.supported.filesystems": "*",
"oozie.base.url": "http://mn01.vagrant:11000/oozie",
"oozie.service.JPAService.jdbc.password": "bigdata",
"oozie.service.JPAService.jdbc.username": "oozie",
"oozie.service.AuthorizationService.security.enabled": "true",
"oozie.db.schema.name": "oozie",
"oozie.service.HadoopAccessorService.hadoop.configurations": "*={{hadoop_conf_dir}}",
"oozie.service.ELService.ext.functions.coord-sla-submit": "\n instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,\n user=org.apache.oozie.coord.CoordELFunctions#coord_user",
"oozie.service.ELService.ext.functions.workflow": "\n now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,\n today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,\n yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,\n currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,\n lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,\n currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,\n lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,\n formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,\n latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,\n future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo",
"oozie.service.JPAService.jdbc.driver": "com.mysql.jdbc.Driver",
"oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService",
"oozie.service.HadoopAccessorService.keytab.file": "/etc/security/keytabs/oozie.service.keytab",
"oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler",
"oozie.authentication.type": "kerberos",
"oozie.authentication.kerberos.principal": "HTTP/_HOST@HADOOP.TEST"
},
"ssl-server": {
"ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks",
"ssl.server.keystore.keypassword": "bigdata",
"ssl.server.truststore.location": "/etc/security/serverKeys/all.jks",
"ssl.server.keystore.password": "bigdata",
"ssl.server.truststore.password": "bigdata",
"ssl.server.truststore.type": "jks",
"ssl.server.keystore.type": "jks",
"ssl.server.truststore.reload.interval": "10000"
},
"ranger-site": {},
"hbase-env": {
"hbase_pid_dir": "/var/run/hbase",
"phoenix_sql_enabled": "false",
"hbase_user_nproc_limit": "16000",
"hbase_regionserver_xmn_max": "512",
"hbase_regionserver_xmn_ratio": "0.2",
"hbase_java_io_tmpdir": "/tmp",
"hbase_user": "hbase",
"hbase_master_heapsize": "1024m",
"hbase_user_nofile_limit": "32000",
"content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if java_version < 8 %}\nJDK_DEPENDED_OPTS=\"-XX:PermSize=128m -XX:MaxPermSize=128m\"\n{% endif %}\n \n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}} $JDK_DEPENDED_OPTS\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}} $JDK_DEPENDED_OPTS\"\nexport PHOENIX_QUERYSERVER_OPTS=\"$PHOENIX_QUERYSERVER_OPTS -Djava.security.auth.login.config={{queryserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} $JDK_DEPENDED_OPTS\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} $JDK_DEPENDED_OPTS\"\n{% endif %}\n\n# HBase off-heap MaxDirectMemorySize\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}\"",
"hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab",
"hbase_regionserver_shutdown_timeout": "30",
"hbase_regionserver_heapsize": "1024m",
"hbase_log_dir": "/var/log/hbase",
"hbase_max_direct_memory_size": "",
"hbase_principal_name": "hbase-examplecluster@HADOOP.TEST"
},
"yarn-log4j": {
"content": "\n#Relative to Yarn Log Dir Prefix\nyarn.log.dir=.\n#\n# Job Summary Appender\n#\n# Use following logger to send summary to separate file defined by\n# hadoop.mapreduce.jobsummary.log.file rolled daily:\n# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n#\nhadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\nhadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\nlog4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n# Set the ResourceManager summary log filename\nyarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log\n# Set the ResourceManager summary log level and appender\nyarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}\n#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\n\n# To enable AppSummaryLogging for the RM,\n# set yarn.server.resourcemanager.appsummary.logger to\n# LEVEL,RMSUMMARY in hadoop-env.sh\n\n# Appender for ResourceManager Application Summary Log\n# Requires the following properties to be set\n# - hadoop.log.dir (Hadoop Log directory)\n# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)\n# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)\nlog4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender\nlog4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}\nlog4j.appender.RMSUMMARY.MaxFileSize=256MB\nlog4j.appender.RMSUMMARY.MaxBackupIndex=20\nlog4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\nlog4j.appender.JSA.DatePattern=.yyyy-MM-dd\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false\n\n# Appender for viewing information for errors and warnings\nyarn.ewma.cleanupInterval=300\nyarn.ewma.messageAgeLimitSeconds=86400\nyarn.ewma.maxUniqueMessages=250\nlog4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender\nlog4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}\nlog4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}\nlog4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}\n\n# Audit logging for ResourceManager\nrm.audit.logger=${hadoop.root.logger}\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false\nlog4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log\nlog4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd\n\n# Audit logging for NodeManager\nnm.audit.logger=${hadoop.root.logger}\nlog4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false\nlog4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log\nlog4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd"
},
"ranger-hbase-policymgr-ssl": {
"xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hbase-client/conf/ranger-plugin-keystore.jks",
"xasecure.policymgr.clientssl.truststore.password": "changeit",
"xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hbase-client/conf/ranger-plugin-truststore.jks",
"xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}",
"xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
},
"pig-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n# ***** Set root logger level to DEBUG and its only appender to A.\nlog4j.logger.org.apache.pig=info, A\n\n# ***** A is set to be a ConsoleAppender.\nlog4j.appender.A=org.apache.log4j.ConsoleAppender\n# ***** A uses PatternLayout.\nlog4j.appender.A.layout=org.apache.log4j.PatternLayout\nlog4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n"
}
}
}[root@mn01 data]#
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment