Skip to content

Instantly share code, notes, and snippets.

@tzolov
Last active August 29, 2015 13:56
Show Gist options
  • Save tzolov/9199816 to your computer and use it in GitHub Desktop.
Save tzolov/9199816 to your computer and use it in GitHub Desktop.
Vagrant Pivotal HD 1.1.x Installation
#!/bin/bash
psql -h localhost -p 10432 --username postgres -d gphdmgr -c "ALTER TABLE app ALTER name TYPE text"
#!/bin/bash
# All configuration and installation steps applied here follow the PHD installation guide:
#
#
# Note: The default pwd is /home/vagrant.
#
# Note: By default, Vagrant shares your project directory (that is the one with the Vagrantfile)
# to the /vagrant directory in your guest VMs.
#
# Note: 'root' is the default user. You can not change the root user in the script. "$sudo su - gpadmin" will not work!
# Use the inline syntax instead: "$su - -c "some command" gpadmin".
# Sets the cluster name to be used in PCC (Pivotal Control Center)
CLUSTER_NAME=PHD_C1
# List of Hadoop services to be deployed with this installation.
# Note: Hive is disabled because phd2 and ph3 VMs are configured with just 1GB of memory (Vagrantfile)! To enable Hive
# increase the memory of the VMs to 2GB at least (edit Vagrantfile) and then add 'hive' to the $SERVICES variable.
# Alternativly if you don't have enough physical memory then you can remove one VM (phd3 for example) and increase the memory
# of the remaining VMs. For this you need to remove phd3 definition from the Vagrangfile and from the $MASTER_AND_SLAVES list.
SERVICES=hdfs,yarn,pig,zookeeper,hbase,gpxf,hawq
# Sets the dns name of the VM used as Master node for all Hadoop services (e.g. namenode, hawq master, jobtracker ...)
# Note: Master node is not an Admin node (where PCC runs). By convention the Admin node is the pcc.localdomain.
MASTER_NODE=phd1.localdomain
# By default the HAWQ master is collocated with the other master services.
HAWQ_MASTER=$MASTER_NODE
# List of all Pivotal HD nodes in the cluster (including the master node)
MASTER_AND_SLAVES=$MASTER_NODE,phd2.localdomain,phd3.localdomain
# By default all nodes will be used as Hawq segment hosts. Edit the $HAWQ_SEGMENT_HOSTS variable to change this setup.
HAWQ_SEGMENT_HOSTS=$MASTER_AND_SLAVES
# Client node defaults to the MASTER node
CLIENT_NODE=$MASTER_NODE
# Root password required for creating gpadmin users on the cluster nodes.
# (By default Vagrant creates 'vagrant' root user on every VM. The password is 'vagrant' - used below)
ROOT_PASSWORD=vagrant
# Non-empty password to be used for the gpadmin user. Required by the PHD installation.
GPADMIN_PASSWORD=gpadmin
# Pivotal Control Center (PCC) package name ({PCC_PACKAGE_NAME}.x86_64.tar.gz)
PCC_PACKAGE_NAME=$1
# Pivotal HD (PHD) package name ({PHD_PACKAGE_NAME}.tar.gz)
PHD_PACKAGE_NAME=$2
# HAWQ - Pivotal Advanced Data Service (PADS) package name ({PADS_PACKAGE_NAME}.tar.gz)
PADS_PACKAGE_NAME=$3
echo "********************************************************************************"
echo "* Prepare PCC (Pivotal Control Center) Perquisites "
echo "********************************************************************************"
# Install required packages.
yum -y install httpd mod_ssl postgresql postgresql-devel postgresql-server compat-readline5 createrepo sigar nc expect sudo wget
# If missing try to download the Oracle JDK7 installation binary.
if [ ! -f /vagrant/jdk-7u45-linux-x64.rpm ];
then
cd /vagrant; wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F" "http://download.oracle.com/otn-pub/java/jdk/7u45-b18/jdk-7u45-linux-x64.rpm"; cd ~
fi
# Ensure that all installation packages are available in the same folder where the 'vagrant up' is executed.
[ ! -f /vagrant/jdk-7u45-linux-x64.rpm ] && ( echo "Can not find jdk-7u45-linux-x64.rpm in the vagrant startup directory"; exit 1 )
[ ! -f /vagrant/$PCC_PACKAGE_NAME.x86_64.tar.gz ] && ( echo "Can not find $PCC_PACKAGE_NAME.x86_64.tar.gz in the vagrant startup directory"; exit 1 )
[ ! -f /vagrant/$PHD_PACKAGE_NAME.tar.gz ] && ( echo "Can not find $PHD_PACKAGE_NAME.tar.gz in the vagrant startup directory"; exit 1 )
# <HAWQ> If PADS (e.g. HAWQ) is not available locally, download it from the public distribution.
if [ ! -f /vagrant/$PADS_PACKAGE_NAME.tar.gz ];
then
cd /vagrant; wget http://bitcast-a.v1.o1.sjc1.bitgravity.com/greenplum/pivotal-sw/$PADS_PACKAGE_NAME.tar.gz; cd ~
fi
# </HAWQ>
# Disable security.
sestatus; chkconfig iptables off; service iptables stop; service iptables status
# Install Oracle Java 7 on PCC (e.g Admin) node.
sudo yum -y install /vagrant/jdk-7u45-linux-x64.rpm ; java -version
# Install Oracle Java 6 on PCC (e.g Admin) node.
# cp /vagrant/jdk-7-ea-bin-b76-linux-x64-12_nov_2009-rpm.bin .; chmod a+x ./jdk-7-ea-bin-b76-linux-x64-12_nov_2009-rpm.bin; sudo ./jdk-7-ea-bin-b76-linux-x64-12_nov_2009-rpm.bin; java -version
echo "********************************************************************************"
echo "* Install PCC (Pivotal Control Center) "
echo "********************************************************************************"
service commander stop
# Copy, uncompress and enter the PCC package folder
tar --no-same-owner -xzvf /vagrant/$PCC_PACKAGE_NAME.x86_64.tar.gz --directory /home/vagrant/; cd /home/vagrant/$PCC_PACKAGE_NAME
# Install PCC as root using root's login shell (Note: will not work without the '-' option)
su - -c "cd /home/vagrant/$PCC_PACKAGE_NAME; ./install" root
echo "********************************************************************************"
echo "* Prepare Hosts for Cluster: FCLUSTER_NAME "
echo "********************************************************************************"
echo "Import PHD & PADS packages into the PCC local yum repository ..."
# (Required) For installing PHD
su - -c "tar -xzf /vagrant/$PHD_PACKAGE_NAME.tar.gz --directory ~; icm_client import -s ./$PHD_PACKAGE_NAME" gpadmin
# <<HAQW>>
# Import HAWQ packages in the local yum repo
su - -c "tar -xzf /vagrant/$PADS_PACKAGE_NAME.tar.gz --directory ~; icm_client import -s ./$PADS_PACKAGE_NAME" gpadmin
# <</HAWQ>>
# (Optional) Import DataLoader and UUS installation packages
#su - -c "tar -xzf /vagrant/PHDTools-1.1.0.0-97.tar.gz --directory ~; icm_client import -p ./PHDTools-1.1.0.0-97" gpadmin
# Import Java 7 packages in the local yum repo
su - -c "icm_client import -r /vagrant/jdk-7u45-linux-x64.rpm" gpadmin
echo "********************************************************************************"
echo "* Deploy Cluster: $CLUSTER_NAME "
echo "********************************************************************************"
# Cluster is deployed as gpadmin user!
# Create a hostfile (HostFile.txt) that contains the hostnames of all cluster nodes (except pcc) separated by newlines.
# Important: The hostfile should contain all nodes within your cluster EXCEPT the Admin node (e.g. except pcc.localdomain).
su - -c "echo $MASTER_AND_SLAVES | tr , '\n' > /home/gpadmin/HostFile.txt" gpadmin
# Verify that all hosts are prepared for installation
#su - -c "icm_client scanhosts -f ./HostFile.txt" gpadmin
# Pivotal HD manager deploys clusters using input from the cluster configuration directory. This cluster
# configuration directory contains files that describes the topology and configuration for the cluster and the
# installation procedure.
# Fetch the default Cluster Configuration Templates.
su - -c "icm_client fetch-template -o ~/ClusterConfigDir" gpadmin
# Use the following convention to assign cluster hosts to Hadoop service roles. All changes are
# applied to the ~/ClusterConfigDir/clusterConfig.xml file, generated in the previous step.
# Note: By default HAWQ_MASTER=MASTER_NODE, CLIENT_NODE=MASTER_NODE and HAWQ_SEGMENT_HOSTS=MASTER_AND_SLAVES
# ---------------------------------------------------------------------------------------------------------
# Hosts | Services
# ---------------------------------------------------------------------------------------------------------
# MASTER_NODE | client, namenode, secondarynameonde, yarn-resourcemanager, mapreduce-historyserver,
# | hbase-master,hive-server,hive-metastore,hawq-master,hawq-standbymaste,hawq-segment,
# | gpxf-agent
# |
# MASTER_AND_SLAVES | datanode,yarn-nodemanager,zookeeper-server,hbase-regionserver,hawq-segment,gpxf-agent
# ---------------------------------------------------------------------------------------------------------
# Apply the mapping convention (above) to the default clusterConfig.xml.
sed -i "\
s/<clusterName>.*<\/clusterName>/<clusterName>$CLUSTER_NAME<\/clusterName>/g;\
s/<services>.*<\/services>/<services>$SERVICES<\/services>/g;\
s/<client>.*<\/client>/<client>$CLIENT_NODE<\/client>/g;\
s/<namenode>.*<\/namenode>/<namenode>$MASTER_NODE<\/namenode>/g;\
s/<datanode>.*<\/datanode>/<datanode>$MASTER_AND_SLAVES<\/datanode>/g;\
s/<secondarynamenode>.*<\/secondarynamenode>/<secondarynamenode>$MASTER_NODE<\/secondarynamenode>/g;\
s/<yarn-resourcemanager>.*<\/yarn-resourcemanager>/<yarn-resourcemanager>$MASTER_NODE<\/yarn-resourcemanager>/g;\
s/<yarn-nodemanager>.*<\/yarn-nodemanager>/<yarn-nodemanager>$MASTER_AND_SLAVES<\/yarn-nodemanager>/g;\
s/<mapreduce-historyserver>.*<\/mapreduce-historyserver>/<mapreduce-historyserver>$MASTER_NODE<\/mapreduce-historyserver>/g;\
s/<zookeeper-server>.*<\/zookeeper-server>/<zookeeper-server>$MASTER_AND_SLAVES<\/zookeeper-server>/g;\
s/<hbase-master>.*<\/hbase-master>/<hbase-master>$MASTER_NODE<\/hbase-master>/g;\
s/<hbase-regionserver>.*<\/hbase-regionserver>/<hbase-regionserver>$MASTER_AND_SLAVES<\/hbase-regionserver>/g;\
s/<hive-server>.*<\/hive-server>/<hive-server>$MASTER_NODE<\/hive-server>/g;\
s/<hive-metastore>.*<\/hive-metastore>/<hive-metastore>$MASTER_NODE<\/hive-metastore>/g;\
s/<hawq-master>.*<\/hawq-master>/<hawq-master>$HAWQ_MASTER<\/hawq-master>/g;\
s/<hawq-standbymaster>.*<\/hawq-standbymaster>/<hawq-standbymaster>$HAWQ_MASTER<\/hawq-standbymaster>/g;\
s/<hawq-segment>.*<\/hawq-segment>/<hawq-segment>$HAWQ_SEGMENT_HOSTS<\/hawq-segment>/g;" /home/gpadmin/ClusterConfigDir/clusterConfig.xml
# Use ICM to perform the deploy
#su - -c "icm_client deploy -c ~/ClusterConfigDir -t -i -d -j /vagrant/jdk-7u45-linux-x64.rpm -y /usr/lib/gphd/gphdmgr/hawq_sys_config/" gpadmin
# Set vm.overcommit_memory to 1 to prevent OOM and other VM issues.
sed -i 's/vm.overcommit_memory = 2/vm.overcommit_memory = 1/g' /usr/lib/gphd/gphdmgr/hawq_sys_config/sysctl.conf
# Note: preparehosts expects user inputs like root and gpadmin passwords. The 'expect' tool is used to emulate this user interaction.
cat > /home/gpadmin/deploy_cluster.exp <<EOF
#!/usr/bin/expect -f
set timeout 100
spawn icm_client deploy -c /home/gpadmin/ClusterConfigDir -s -i -d -j /vagrant/jdk-7u45-linux-x64.rpm -y /usr/lib/gphd/gphdmgr/hawq_sys_config/
expect "Please enter the root password for the cluster nodes:"
send -- "$ROOT_PASSWORD\r"
expect "PCC creates a gpadmin user on the newly added cluster nodes (if any). Please enter a non-empty password to be used for the gpadmin user:"
send -- "$GPADMIN_PASSWORD\r"
send -- "\r"
expect eof
EOF
chown gpadmin:gpadmin /home/gpadmin/deploy_cluster.exp; chmod a+x /home/gpadmin/deploy_cluster.exp
# Prepare all PHD hosts and perform the deploy
su - -c "expect -f /home/gpadmin/deploy_cluster.exp" gpadmin
echo "********************************************************************************"
echo "* HAWQ - post deploy configuration "
echo "********************************************************************************"
# <<HAWQ>>
su - -c "echo $HAWQ_SEGMENT_HOSTS | tr , '\n' > /home/gpadmin/HAWQ_Segment_Hosts.txt" gpadmin
su - -c "\
scp /home/gpadmin/HAWQ_Segment_Hosts.txt gpadmin@$HAWQ_MASTER:/home/gpadmin/HAWQ_Segment_Hosts.txt;\
ssh gpadmin@$HAWQ_MASTER 'source /usr/local/hawq/greenplum_path.sh;\
/usr/local/hawq/bin/gpssh-exkeys -f /home/gpadmin/HAWQ_Segment_Hosts.txt -p $GPADMIN_PASSWORD'" gpadmin
# <</HAWQ>>
echo "********************************************************************************"
echo "* Start Cluster: $CLUSTER_NAME "
echo "********************************************************************************"
#wait until the cluster is completely installed (e.g. not in install_progress)
time sleep 60;
su - -c "icm_client list" gpadmin
su - -c "icm_client start -l $CLUSTER_NAME" gpadmin
echo "********************************************************************************"
echo "* Initialise HAWQ "
echo "********************************************************************************"
# <<HAWQ>>
su - -c "ssh gpadmin@$HAWQ_MASTER '/etc/init.d/hawq init'" gpadmin;
# <</HAWQ>>
#!/bin/bash
if [ ! -f ./PHDCE1.1 ];
then
if [ ! -f ./pivotalhd_community_1.1.tar.gz ];
then
wget "http://bitcast-a.v1.o1.sjc1.bitgravity.com/greenplum/pivotal-sw/pivotalhd_community_1.1.tar.gz"
fi
tar -xzf ./pivotalhd_community_1.1.tar.gz
fi
cd PHDCE1.1
if [ ! -f ./gist.tar.gz ];
then
wget "https://gist.github.com/tzolov/9199816/download" -O gist.tar.gz
tar --strip-components=1 -xzf ./gist.tar.gz
fi
vagrant destroy -f
vagrant up --provider $1
Copy Vagrant and pcc_provision_phd110.sh in the PHDCE1.1 folder
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
# Embedded provisioning script common for all cluster hosts and PCC.
$phd_provision_script = <<SCRIPT
#!/bin/bash
# Install the packages required for all cluster and admin nodes
yum -y install postgresql-devel nc expect ed ntp dmidecode pciutils
# Set timezone and run NTP (set to Europe - Amsterdam time).
/etc/init.d/ntpd stop; mv /etc/localtime /etc/localtime.bak; ln -s /usr/share/zoneinfo/Europe/Amsterdam /etc/localtime; /etc/init.d/ntpd start
cat > /etc/hosts <<EOF
127.0.0.1 localhost.localdomain localhost
::1 localhost6.localdomain6 localhost6
10.211.155.100 pcc.localdomain pcc
10.211.155.101 phd1.localdomain phd1
10.211.155.102 phd2.localdomain phd2
10.211.155.103 phd3.localdomain phd3
EOF
SCRIPT
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.define :phd1 do |phd1|
phd1.vm.box = "CentOS-6.4-x86_64"
phd1.vm.provider :virtualbox do |v|
v.name = "phd1"
v.customize ["modifyvm", :id, "--memory", "1536"]
end
phd1.vm.provider "vmware_fusion" do |v|
v.name = "phd1"
v.vmx["memsize"] = "1536"
end
phd1.vm.hostname = "phd1.localdomain"
phd1.vm.network :private_network, ip: "10.211.155.101"
phd1.vm.provision :shell, :inline => $phd_provision_script
phd1.vm.provision :shell, :inline => "hostname phd1.localdomain"
end
config.vm.define :phd2 do |phd2|
phd2.vm.box = "CentOS-6.4-x86_64"
phd2.vm.provider :virtualbox do |v|
v.name = "phd2"
v.customize ["modifyvm", :id, "--memory", "1536"]
end
phd2.vm.provider "vmware_fusion" do |v|
v.name = "phd2"
v.vmx["memsize"] = "1536"
end
phd2.vm.hostname = "phd2.localdomain"
phd2.vm.network :private_network, ip: "10.211.155.102"
phd2.vm.provision :shell, :inline => $phd_provision_script
phd2.vm.provision :shell, :inline => "hostname phd2.localdomain"
end
config.vm.define :phd3 do |phd3|
phd3.vm.box = "CentOS-6.4-x86_64"
phd3.vm.provider :virtualbox do |v|
v.name = "phd3"
v.customize ["modifyvm", :id, "--memory", "1536"]
end
phd3.vm.provider "vmware_fusion" do |v|
v.name = "phd3"
v.vmx["memsize"] = "1536"
end
phd3.vm.hostname = "phd3.localdomain"
phd3.vm.network :private_network, ip: "10.211.155.103"
phd3.vm.provision :shell, :inline => $phd_provision_script
phd3.vm.provision :shell, :inline => "hostname phd3.localdomain"
end
config.vm.define :pcc do |pcc|
pcc.vm.box = "CentOS-6.4-x86_64"
pcc.vm.provider :virtualbox do |v|
v.name = "pcc"
v.customize ["modifyvm", :id, "--memory", "1024"]
end
pcc.vm.provider "vmware_fusion" do |v|
v.name = "pcc"
v.vmx["memsize"] = "1024"
end
pcc.vm.hostname = "pcc.localdomain"
pcc.vm.network :private_network, ip: "10.211.155.100"
pcc.vm.network :forwarded_port, guest: 5443, host: 5443
pcc.vm.provision :shell, :inline => $phd_provision_script
pcc.vm.provision "shell" do |s|
s.path = "pcc_provision_phd110.sh"
s.args = ["PCC-2.1.0-460", "PHD-1.1.0.0-76", "PADS-1.1.3-31"]
end
pcc.vm.provision :shell, :inline => "hostname pcc.localdomain"
pcc.vm.provision :shell, :inline => "psql -h localhost -p 10432 --username postgres -d gphdmgr -c 'ALTER TABLE app ALTER name TYPE text'"
end
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment