Skip to content

Instantly share code, notes, and snippets.

@hkropp
Created July 1, 2015 12:13
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save hkropp/4d55fe1f1d0fdcf8dff0 to your computer and use it in GitHub Desktop.
Save hkropp/4d55fe1f1d0fdcf8dff0 to your computer and use it in GitHub Desktop.
{
"Blueprints" :
{
"blueprint_name" : "vagrant-hdp22-n1",
"stack_name" : "HDP",
"stack_version" : "2.2"
},
"configurations": [
{
"core-site": {
"hadoop.proxyuser.root.groups": "*",
"hadoop.proxyuser.root.hosts": "*",
"hadoop.proxyuser.hcat.groups": "*",
"hadoop.proxyuser.hcat.hosts": "*"
}
},{
"webhcat-site": {
"webhcat.proxyuser.root.groups": "*",
"webhcat.proxyuser.root.hosts": "*"
}
},{
"hive-site": {
"hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager",
"hive.compactor.initiator.on": "true",
"hive.compactor.worker.threads": "5",
"javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
"javax.jdo.option.ConnectionPassword": "hive123",
"javax.jdo.option.ConnectionURL": "jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true",
"javax.jdo.option.ConnectionUserName": "hive"
}
},{
"hive-env": {
"hive_ambari_database": "MySQL",
"hive_database": "Existing MySQL Database",
"hive_database_type": "mysql"
}
},{
"spark-defaults": {
"properties": {
"spark.driver.extraJavaOptions": "",
"spark.history.kerberos.keytab": "none",
"spark.history.kerberos.principal": "none",
"spark.history.provider": "org.apache.spark.deploy.yarn.history.YarnHistoryProvider",
"spark.history.ui.port": "18080",
"spark.yarn.am.extraJavaOptions": "",
"spark.yarn.applicationMaster.waitTries": "10",
"spark.yarn.containerLauncherMaxThreads": "25",
"spark.yarn.driver.memoryOverhead": "384",
"spark.yarn.executor.memoryOverhead": "384",
"spark.yarn.max.executor.failures": "2",
"spark.yarn.preserve.staging.files": "false",
"spark.yarn.queue": "default",
"spark.yarn.scheduler.heartbeat.interval-ms": "5000",
"spark.yarn.submit.file.replication": "1"
}
}
},{
"spark-env": {
"properties": {
"content": "\n#!/usr/bin/env bash\n\n# This file is sourced when running various Spark programs.\n# Copy it as spark-env.sh and edit that to configure Spark for your site.\n\n# Options read in YARN client mode\n#SPARK_EXECUTOR_INSTANCES=\"2\" #Number of workers to start (Default: 2)\n#SPARK_EXECUTOR_CORES=\"1\" #Number of cores for the workers (Default: 1).\n#SPARK_EXECUTOR_MEMORY=\"1G\" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)\n#SPARK_DRIVER_MEMORY=\"512 Mb\" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)\n#SPARK_YARN_APP_NAME=\"spark\" #The name of your application (Default: Spark)\n#SPARK_YARN_QUEUE=\"~@~Xdefault~@~Y\" #The hadoop queue to use for allocation requests (Default: @~Xdefault~@~Y)\n#SPARK_YARN_DIST_FILES=\"\" #Comma separated list of files to be distributed with the job.\n#SPARK_YARN_DIST_ARCHIVES=\"\" #Comma separated list of archives to be distributed with the job.\n\n# Generic options for the daemons used in the standalone deploy mode\n\n# Alternate conf dir. (Default: ${SPARK_HOME}/conf)\nexport SPARK_CONF_DIR=${SPARK_HOME:-{{spark_home}}}/conf\n\n# Where log files are stored.(Default:${SPARK_HOME}/logs)\n#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs\nexport SPARK_LOG_DIR={{spark_log_dir}}\n\n# Where the pid file is stored. (Default: /tmp)\nexport SPARK_PID_DIR={{spark_pid_dir}}\n\n# A string representing this instance of spark.(Default: $USER)\nSPARK_IDENT_STRING=$USER\n\n# The scheduling priority for daemons. (Default: 0)\nSPARK_NICENESS=0\n\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\nif [ -d \"/etc/tez/conf/\" ]; then\n export TEZ_CONF_DIR=/etc/tez/conf\nelse\n export TEZ_CONF_DIR=\nfi",
"spark_group": "spark",
"spark_log_dir": "/var/log/spark",
"spark_pid_dir": "/var/run/spark",
"spark_user": "spark"
}
}
}
],
"host_groups" : [
{
"name" : "master_slave_client_1",
"cardinality" : "1",
"components" : [
{ "name": "NAMENODE" },
{ "name": "SECONDARY_NAMENODE" },
{ "name": "METRICS_COLLECTOR" },
{ "name": "ZOOKEEPER_SERVER" },
{ "name": "HIVE_METASTORE" },
{ "name": "HIVE_SERVER" },
{ "name": "WEBHCAT_SERVER" },
{ "name": "APP_TIMELINE_SERVER" },
{ "name": "SPARK_JOBHISTORYSERVER" },
{ "name": "ZEPPELIN_MASTER" },
{ "name": "DATANODE" },
{ "name": "NODEMANAGER" },
{ "name": "METRICS_MONITOR" },
{ "name": "HDFS_CLIENT" },
{ "name": "SPARK_CLIENT" },
{ "name": "MAPREDUCE2_CLIENT" },
{ "name": "YARN_CLIENT" },
{ "name": "TEZ_CLIENT" },
{ "name": "ZOOKEEPER_CLIENT" },
{ "name": "HIVE_CLIENT"},
{ "name": "HCAT" },
{ "name": "PIG" },
{ "name": "ZOOKEEPER_CLIENT" },
{ "name": "RESOURCEMANAGER" },
{ "name": "HISTORYSERVER" }
]
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment