Created
September 1, 2017 20:10
-
-
Save jwyant/392f0ce8793c52c3c244621119d0bd08 to your computer and use it in GitHub Desktop.
Bootstrap Anaconda 2 4.4.0 and Tensorflow on BDCS-CE 17.3.3-20 Spark 2.1
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/sh | |
ANACONDAURL="https://repo.continuum.io/archive/Anaconda2-4.4.0-Linux-x86_64.sh" | |
ANACONDASCRIPTFILENAME="$(basename ${ANACONDAURL})" | |
#sample bootstrap script listing properties | |
executeOnAmbariNodes() { | |
echo " executeOnAmbariNodes ..."; | |
} | |
executeOnSparkThriftServerNodes() { | |
echo "executeOnSparkThriftServerNodes ..."; | |
} | |
executeOnHive2ServerNodes() { | |
echo "executeOnHive2ServerNodes..."; | |
} | |
executeOnComputeAndStorageSlaveNodes() { | |
echo "executeOnComputeAndStorageSlaveNodes..."; | |
} | |
executeOnComputeOnlySlaveNodes() { | |
echo "executeOnComputeOnlySlaveNodes..."; | |
} | |
executeOnAllNodes() { | |
echo "executeOnAllNodes..."; | |
wget -O /tmp/${ANACONDASCRIPTFILENAME} ${ANACONDAURL} && sudo bash /tmp/${ANACONDASCRIPTFILENAME} -f -b -p /opt/anaconda2 | |
cat << EOF > /tmp/setanaconda.py | |
import requests | |
import json | |
# Get our unique spark interpreter ID from the list of interpreters | |
r = requests.get('http://127.0.0.1:9995/api/interpreter/setting/') | |
for item in r.json()['body']: | |
if item['name'] == 'spark2': | |
sparkid = item['id'] | |
sparkbody = item | |
# Change zeppelin.pyspark.python | |
sparkbody['properties']['zeppelin.pyspark.python'] = '/opt/anaconda2/bin/python' | |
# Set our new interpreter. | |
r = requests.put('http://127.0.0.1:9995/api/interpreter/setting/' + sparkid, data=json.dumps(sparkbody)) | |
print r.status_code | |
EOF | |
/opt/anaconda2/bin/python /tmp/setanaconda.py | |
yum install -y glibc | |
/opt/anaconda2/bin/conda install -y -q tensorflow | |
} | |
executeOnMasters() { | |
echo "executeOnMasters ..."; | |
} | |
echo 'Hello Bootstrap' | |
echo 'Object-store-url:=' $(getBaseObjectStoreUrl); | |
echo 'Cluster-name:=' $(getClusterName); | |
echo 'Masters:=' $(getMasterNodes); | |
echo 'ComputeOnlySlaveNodes:=' $(getComputeOnlySlaveNodes); | |
echo 'ComputeAndStorageSlaveNodes:=' $(getComputeAndStorageSlaveNodes); | |
echo 'getAllNodes:=' $(getAllNodes); | |
echo 'getAmbariServerNodes:=' $(getAmbariServerNodes); | |
echo 'getSparkThriftServerNodes:=' $(getSparkThriftServerNodes); | |
echo 'getHive2ServerNodes:=' $(getHive2ServerNodes); | |
# hdfsStat $(getBaseObjectStoreUrl)/bdcsce/bootstrap | |
# rm -f /tmp/one.sh | |
# hdfsCopy $(getBaseObjectStoreUrl)/bdcsce/bootstrap/bootstrap.sh | |
file:///tmp/one.sh | |
_HOSTNAME=$(hostname -f) | |
for i in $(getAmbariServerNodes); do | |
if [ ${_HOSTNAME} = $i ]; then | |
executeOnAmbariNodes; | |
fi | |
done | |
for i in $(getSparkThriftServerNodes); do | |
if [ ${_HOSTNAME} = $i ]; then | |
executeOnSparkThriftServerNodes; | |
fi | |
done | |
for i in $(getHive2ServerNodes); do | |
if [ ${_HOSTNAME} = $i ]; then | |
executeOnHive2ServerNodes; | |
fi | |
done | |
for i in $(getMasterNodes); do | |
if [ ${_HOSTNAME} = $i ]; then | |
executeOnMasters; | |
fi done | |
for i in $(getComputeAndStorageSlaveNodes); do | |
if [ ${_HOSTNAME} = $i ]; then | |
executeOnComputeAndStorageSlaveNodes; | |
fi | |
done | |
for i in $(getComputeOnlySlaveNodes); do | |
if [ ${_HOSTNAME} = $i ]; then | |
executeOnComputeOnlySlaveNodes; | |
fi | |
done | |
for i in $(getAllNodes); do | |
if [ ${_HOSTNAME} = $i ]; then | |
executeOnAllNodes; | |
fi | |
done | |
### No exits please !! |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment