Last active
November 2, 2018 18:00
-
-
Save abajwa-hw/ae4125c5154deac6713cdd25d2b83620 to your computer and use it in GitHub Desktop.
Setup HDF via blueprints
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Sample script to deploy HDF via blueprint | |
# - Installs Ambari server/agents | |
# - Installs HDF mpack | |
# - Uses ambari-bootstrap to generate blueprint based on stack advisor recommendation and starts cluster install | |
# - Optionally: installs KDC, sets up postgres for Ranger, allows customizations of config properties and number of Nifi nodes | |
# | |
# Usage: su as root and run below to invoke this script on a host where CentOS/RHEL has been freshly installed (do NOT run this on HDP sandbox!). You can customize the functionality by setting env vars e.g. | |
# export host_count=3; export install_nifi_on_all_nodes=true; curl -sSL https://gist.github.com/abajwa-hw/ae4125c5154deac6713cdd25d2b83620/raw | sudo -E sh ; | |
# Note for multi-node install, you will need to install/register agents on other nodes first using below (replace <AMBARI_SERVER_HOSTNAME>) | |
# export ambari_server=<AMBARI_SERVER_HOSTNAME>; curl -sSL https://raw.githubusercontent.com/seanorama/ambari-bootstrap/master/ambari-bootstrap.sh | sudo -E sh ; | |
# e.g. | |
# export ambari_server=abajwa-hdf-qe-bp-1.openstacklocal; export ambari_version=2.4.1.0; curl -sSL https://raw.githubusercontent.com/seanorama/ambari-bootstrap/master/ambari-bootstrap.sh | sudo -E sh ; | |
# see for more details: https://community.hortonworks.com/articles/56849/automate-deployment-of-hdf-20-clusters-using-ambar.html | |
#clean previous setup | |
rm -rf ~/ambari-bootstrap | |
rm -f *_payload | |
rm -rf ~/hdf_ambari_mp | |
rm ~/apache-maven-3.0.5-bin.tar.gz | |
#remove unneeded repos | |
if [ -f /etc/yum.repos.d/zfs.repo ]; then | |
rm -f /etc/yum.repos.d/zfs.repo | |
fi | |
if [ -f /etc/yum.repos.d/lustre.repo ]; then | |
rm -f /etc/yum.repos.d/lustre.repo | |
fi | |
set -e -x | |
export install_nifi_on_all_nodes="${install_nifi_on_all_nodes:-true}" | |
export use_default_configs="${use_default_configs:-true}" | |
export ambari_services=${ambari_services:-ZOOKEEPER NIFI KAFKA STORM LOGSEARCH AMBARI_METRICS AMBARI_INFRA} | |
export ambari_password=${ambari_password:-admin} | |
export cluster_name=${cluster_name:-HDF} | |
export JAVA_HOME=${JAVA_HOME:-/usr/lib/jvm/java-1.8.0-openjdk.x86_64/} | |
export ranger_user="${ranger_user:-rangeradmin}" | |
export ranger_pass="${ranger_pass:-BadPass#1}" | |
export host_count=${host_count:-ask} | |
export setup_kdc="${setup_kdc:-false}" | |
export setup_postgres_for_ranger="${setup_postgres_for_ranger:-true}" | |
export host_os=${host_os:-centos6} | |
export ambari_stack_version=${ambari_stack_version:-2.1} | |
export ambari_password=${ambari_password:-BadPass#1} ## For security purposes, when installing on AWS, this password will be overridden with your AWS accountid | |
export ambari_version=2.5.1.0 ## don't use Ambari 2.5.2.0 for HDF, there is a bug | |
export hdf_ambari_mpack_url="http://public-repo-1.hortonworks.com/HDF/${host_os}/3.x/updates/3.0.1.1/tars/hdf_ambari_mp/hdf-ambari-mpack-3.0.1.1-5.tar.gz" | |
#export hdf_ambari_mpack_url="http://public-repo-1.hortonworks.com/HDF/${host_os}/2.x/updates/2.1.0.0/tars/hdf_ambari_mp/hdf-ambari-mpack-2.1.0.0-165.tar.gz" | |
#export hdf_ambari_mpack_url="http://public-repo-1.hortonworks.com/HDF/${host_os}/2.x/updates/2.0.0.0/tars/hdf_ambari_mp/hdf-ambari-mpack-2.0.0.0-579.tar.gz" | |
#export ambari_repo="http://public-repo-1.hortonworks.com/ambari/${host_os}/2.x/updates/${ambari_version}/ambari.repo" | |
#export hdf_repo_url="http://public-repo-1.hortonworks.com/HDF/${host_os}/2.x/updates/2.0.0.0" | |
#service user for Ambari to start/stop services on boot | |
export service_user="demokitadmin" | |
export service_password="BadPass#1" | |
yum install -y git python-argparse | |
cd ~ | |
sudo git clone https://github.com/seanorama/ambari-bootstrap.git | |
export install_ambari_server=true | |
chmod +x ~/ambari-bootstrap/ambari-bootstrap.sh | |
~/ambari-bootstrap/ambari-bootstrap.sh | |
sleep 20 | |
#create demokitadmin user | |
curl -iv -u admin:admin -H "X-Requested-By: blah" -X POST -d "{\"Users/user_name\":\"${service_user}\",\"Users/password\":\"${service_password}\",\"Users/active\":\"true\",\"Users/admin\":\"true\"}" http://localhost:8080/api/v1/users | |
#if running on AWS, fetch accountId | |
if [ -f /sys/hypervisor/uuid ] && [ `head -c 3 /sys/hypervisor/uuid` == ec2 ]; then | |
echo "AWS detected, reading accountId..." | |
eval $(curl -sSL http://169.254.169.254/latest/dynamic/instance-identity/document \ | |
| awk -F\" '/:/ {print "export "$2"="$4}') | |
#if accountId not empty, use it as password for admin user | |
echo "Overriding ambari_password to AWS accountid..." | |
if [ -n "${accountId}" ]; then | |
export ambari_password=${accountId} | |
fi | |
else | |
echo "non-AWS detecting. Leaving password to default" | |
fi | |
#update admin password | |
curl -iv -u admin:admin -H "X-Requested-By: blah" -X PUT -d "{ \"Users\": { \"user_name\": \"admin\", \"old_password\": \"admin\", \"password\": \"${ambari_password}\" }}" http://localhost:8080/api/v1/users/admin | |
ambari-server stop | |
echo yes | ambari-server install-mpack --mpack=${hdf_ambari_mpack_url} --purge --verbose | |
#Optional - modify stack advisor to recommend installing Nifi on all nodes | |
if [ "${install_nifi_on_all_nodes}" = true ]; then | |
cp /var/lib/ambari-server/resources/stacks/HDF/2.0/services/stack_advisor.py /var/lib/ambari-server/resources/stacks/HDF/2.0/services/stack_advisor.py.bak | |
sed -i.bak "s#return \['ZOOKEEPER_SERVER', 'METRICS_COLLECTOR'\]#return \['ZOOKEEPER_SERVER', 'METRICS_COLLECTOR', 'NIFI_MASTER'\]#" /var/lib/ambari-server/resources/stacks/HDF/2.0/services/stack_advisor.py | |
sed -i.bak "s#\('ZOOKEEPER_SERVER': {\"min\": 3},\)#\1\n 'NIFI_MASTER': {\"min\": $host_count},#g" /var/lib/ambari-server/resources/stacks/HDF/2.0/services/stack_advisor.py | |
fi | |
#Optional - setup KDC using automation from https://gist.github.com/abajwa-hw/f8b83e1c12abb1564531e00836b098fa | |
if [ "${setup_kdc}" = true ]; then | |
echo "Setting up KDC..." | |
curl -sSL https://gist.github.com/abajwa-hw/f8b83e1c12abb1564531e00836b098fa/raw | sudo -E sh | |
fi | |
#ranger pre-reqs | |
if [ "${setup_postgres_for_ranger}" = true ]; then | |
echo "Setting up postgres for Ranger..." | |
yum install -y postgresql-jdbc* | |
chmod 644 /usr/share/java/postgresql-jdbc.jar | |
echo "CREATE DATABASE ranger;" | sudo -u postgres psql -U postgres | |
echo "CREATE USER ${ranger_user} WITH PASSWORD '${ranger_pass}';" | sudo -u postgres psql -U postgres | |
echo "ALTER DATABASE ranger OWNER TO ${ranger_user};" | sudo -u postgres psql -U postgres | |
echo "GRANT ALL PRIVILEGES ON DATABASE ranger TO ${ranger_user};" | sudo -u postgres psql -U postgres | |
sed -i.bak s/ambari,mapred/${ranger_user},ambari,mapred/g /var/lib/pgsql/data/pg_hba.conf | |
cat /var/lib/pgsql/data/postgresql.conf | grep listen_addresses | |
#make sure listen_addresses='*' | |
ambari-server setup --jdbc-db=postgres --jdbc-driver=/usr/share/java/postgresql-jdbc.jar | |
service postgresql restart | |
fi | |
#start Ambari | |
ambari-server start | |
sleep 30 | |
cd ~ | |
#any customizations? | |
cd ~/ambari-bootstrap/deploy/ | |
#whether to test with default configs or custom | |
if [ "${use_default_configs}" = true ]; then | |
tee configuration-custom.json > /dev/null << EOF | |
{ | |
"configurations" : { | |
"nifi-ambari-config": { | |
"nifi.security.encrypt.configuration.password": "changemeplease" | |
} | |
} | |
} | |
EOF | |
else | |
tee configuration-custom.json > /dev/null << EOF | |
{ | |
"configurations" : { | |
"nifi-ambari-config": { | |
"nifi.content.repository.dir.default": "/nifi/content_repository", | |
"nifi.database.dir": "/nifi/database_repository", | |
"nifi.flowfile.repository.dir": "/nifi/flowfile_repository", | |
"nifi.internal.dir": "/nifi", | |
"nifi.provenance.repository.dir.default": "/nifi/provenance_repository", | |
"nifi.max_mem": "1g", | |
"nifi.node.port": "9092", | |
"nifi.node.protocol.port": "9089", | |
"nifi.node.ssl.port": "9093", | |
"nifi.security.encrypt.configuration.password": "changemeplease" | |
}, | |
"nifi-env": { | |
"nifi_user": "mynifiuser", | |
"nifi_group": "mynifigroup" | |
}, | |
"nifi-properties": { | |
"nifi.security.identity.mapping.pattern.dn": "^CN=(.*?), OU=(.*?)$", | |
"nifi.security.identity.mapping.value.dn": "\$1@\$2" | |
} | |
} | |
} | |
EOF | |
fi | |
echo "Deploying HDF..." | |
export ambari_stack_name=HDF | |
./deploy-recommended-cluster.bash | |
#To enable security: | |
#1. enable SSL: https://community.hortonworks.com/articles/58009/hdf-20-enable-ssl-for-apache-nifi-from-ambari.html | |
#2. enable Ranger: | |
# https://community.hortonworks.com/articles/58769/hdf-20-enable-ranger-authorization-for-hdf-compone.html | |
# https://community.hortonworks.com/articles/60001/hdf-20-integrating-secured-nifi-with-secured-range.html | |
#3. To enable Kerberos: | |
# https://community.hortonworks.com/articles/60186/hdf-20-use-ambari-to-enable-kerberos-for-hdf-clust-1.html | |
# https://community.hortonworks.com/articles/58793/hdf-20-use-ambari-to-enable-kerberos-for-hdf-clust.html | |
#To reset and start over: | |
#python /usr/lib/python2.6/site-packages/ambari_agent/HostCleanup.py -s | |
#ambari-server stop | |
#ambari-server reset | |
# ##type yes twice | |
#ambari-agent stop | |
#yum remove -y ambari-server ambari-agent | |
#rm -rf /root/* | |
#rm -rf /var/lib/ambari-server/resources/host_scripts/nifi-certs | |
#kdb5_util destroy | |
# ##type yes |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment