Last active
June 16, 2022 12:55
-
-
Save desterhuizen/6f3ad81ec184fb77d4d0c4afe005b37d to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
set -o errexit | |
promptToExit() { | |
echo $1 "(Y/N)" | |
read yn | |
if [[ "$yn" != "y" && "$yn" != "Y" ]]; then | |
echo "Stoppping the process..." | |
exit; | |
fi | |
} | |
buildBackingDBS() { | |
terraform -chdir=terraform/ show --json | jq -r '.values.root_module.resources[].values | select (.instance_type != null) | {name: .tags.Name, pub_ip: .public_ip, priv_ip: .private_ip}'| jq -r '["["+.name[17:]+"]", "\n", .pub_ip] | join("")' > run/host_list.conf | |
terraform -chdir=terraform/ show --json | jq -r '.values.root_module.resources[].values | select (.instance_type != null) | {name: .tags.Name, pub_ip: .public_ip, priv_ip: .private_ip}' > run/hosts.txt | |
echo '\n\n[om:children]\nom1\nom2\nom3\n\n[deployment:children]\ndep1\ndep2\ndep3'>> run/host_list.conf | |
ANSIBLE_HOST_KEY_CHECKING=false | |
ansible -i run/host_list.conf all -u ec2-user --become --become-method sudo --become-user root -a "hostname -f" | |
ansible -i run/host_list.conf all -u ec2-user --become --become-method sudo --become-user root -a "yum -y update" | |
ansible -i run/host_list.conf all -u ec2-user --become --become-method sudo --become-user root -a "yum -y install cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl net-snmp openldap openssl xz-libs" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "mkdir -p /data/appdb/" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "mkdir -p /data/bckdb" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "mkdir -p /data/keys/" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "curl -L https://repo.mongodb.com/yum/redhat/7/mongodb-enterprise/4.4/x86_64/RPMS/mongodb-enterprise-server-4.4.4-1.el7.x86_64.rpm -o /tmp/mongodb-enterprise-server-4.4.4-1.el7.x86_64.rpm" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "curl -L https://repo.mongodb.com/yum/redhat/7/mongodb-enterprise/4.4/x86_64/RPMS/mongodb-enterprise-shell-4.4.4-1.el7.x86_64.rpm -o /tmp/mongodb-enterprise-shell-4.4.4-1.el7.x86_64.rpm" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "rpm -i /tmp/mongodb-enterprise-server-*.rpm" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "rpm -i /tmp/mongodb-enterprise-shell-*.rpm" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "sed -i 's/#replication:/replication:\n replSetName: appDB/g' /etc/mongod.conf" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "sed -i 's/path:.*/path: \/data\/appdb\/mongod.log/g' /etc/mongod.conf" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "sed -i 's/dbPath:.*/dbPath: \/data\/appdb/g' /etc/mongod.conf" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "sed -i 's/bindIp:.*/bindIp: 0.0.0.0/g' /etc/mongod.conf" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "sed -i 's/#security.*/security: \n authorization: enabled \n keyFile: \/data\/keys\/keyfile /g' /etc/mongod.conf" | |
ansible -i run/host_list.conf om -u ec2-user --become --become-method sudo --become-user root -a "cat /etc/mongod.conf" | |
KEY_FILE=$(openssl rand -base64 756) | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo '$KEY_FILE' | tee /data/keys/keyfile" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "chown -R mongod:mongod /data" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "chmod 400 /data/keys/keyfile" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "yum install checkpolicy" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo 'module mongodb_cgroup_memory 1.0; \ | |
require { \ | |
type cgroup_t; \ | |
type mongod_t; \ | |
class dir search; \ | |
class file { getattr open read }; \ | |
} \ | |
#============= mongod_t ============== \ | |
allow mongod_t cgroup_t:dir search; \ | |
allow mongod_t cgroup_t:file { getattr open read };' | tee /tmp/mongodb_cgroup_memory.te" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "checkmodule -M -m -o /tmp/mongodb_cgroup_memory.mod /tmp/mongodb_cgroup_memory.te" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "semodule_package -o /tmp/mongodb_cgroup_memory.pp -m /tmp/mongodb_cgroup_memory.mod" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "semodule -i /tmp/mongodb_cgroup_memory.pp" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo 'module mongodb_proc_net 1.0; \ | |
require { \ | |
type proc_net_t; \ | |
type mongod_t; \ | |
class file { open read }; \ | |
} \ | |
\ | |
#============= mongod_t ============== \ | |
allow mongod_t proc_net_t:file { open read };' | tee /tmp/mongodb_proc_net.te" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "checkmodule -M -m -o /tmp/mongodb_proc_net.mod /tmp/mongodb_proc_net.te" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "semodule_package -o /tmp/mongodb_proc_net.pp -m /tmp/mongodb_proc_net.mod" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "semodule -i /tmp/mongodb_proc_net.pp" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "semanage fcontext -a -t mongod_log_t '/data/appdb.*'" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "chcon -Rv -u system_u -t mongod_log_t '/data/appdb'" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "restorecon -R -v '/data/appdb'" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "semanage fcontext -a -t mongod_var_lib_t '/data/keys.*'" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "chcon -Rv -u system_u -t mongod_var_lib_t '/data/keys'" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "restorecon -R -v '/data/keys'" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "semanage fcontext -a -t mongod_var_lib_t '/data/appdb.*'" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "chcon -Rv -u system_u -t mongod_var_lib_t '/data/appdb'" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "restorecon -R -v '/data/appdb'" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "systemctl enable mongod" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "systemctl start mongod" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "ps -fe | grep mongo" | |
HOSTS_FILE=$(cat run/hosts.txt | jq -r '[ .priv_ip, .name[17:]+".test.local", .name[17:]] | join("\t")') | |
ansible -i run/host_list.conf all -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo '$HOSTS_FILE' >> /etc/hosts" | |
ansible -i run/host_list.conf all -m shell -u ec2-user --become --become-method sudo --become-user root -a "cat /etc/hosts" | |
ansible -i run/host_list.conf all -u ec2-user --become --become-method sudo --become-user root -a "hostname -f" | |
OM1=$(cat run/host_list.conf | grep '\[om1\]' -A 1 | tail -n 1) | |
ansible -i run/host_list.conf om1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostnamectl set-hostname $HOST1" | |
ansible -i run/host_list.conf om2 -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostnamectl set-hostname $HOST2" | |
ansible -i run/host_list.conf om3 -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostnamectl set-hostname $HOST3" | |
ansible -i run/host_list.conf dep1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostnamectl set-hostname $DEP_HOST1" | |
ansible -i run/host_list.conf dep2 -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostnamectl set-hostname $DEP_HOST2" | |
ansible -i run/host_list.conf dep3 -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostnamectl set-hostname $DEP_HOST3" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostnamectl set-hostname $LDAP_HOST" | |
ansible -i run/host_list.conf all -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostname -f" | |
ansible -i run/host_list.conf om1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "mongo localhost --eval 'rs.initiate({_id: \"appDB\", version: 1, members:[{_id: 0, host:\"$HOST1\"},{_id: 1, host: \"$HOST2\"},{_id: 2, host: \"$HOST3\"}]})'" | |
sleep 20; | |
ansible -i run/host_list.conf om1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "mongo localhost:27017/admin --eval 'db.createUser({user: \"'$USER'\", pwd: \"'$PASSWORD'\", roles: [{role: \"root\", db: \"admin\"}]})'" | |
ansible -i run/host_list.conf om1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "mongo --host 'mongodb://localhost:27017/admin?replicaSet=appDB' -u $USER -p $PASSWORD --eval 'db.createUser({user: \"'$OPS_USER'\", pwd: \"'$OPS_PASS'\", roles: [{role: \"readWriteAnyDatabase\", db: \"admin\"}, {role: \"dbAdminAnyDatabase\", db: \"admin\"}, {role: \"clusterAdmin\", db: \"admin\"}]})'" | |
} | |
installOpsManager(){ | |
ansible -i run/host_list.conf om1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "curl --silent -L https://downloads.mongodb.com/on-prem-mms/rpm/mongodb-mms-4.4.10.100.20210303T2102Z-1.x86_64.rpm -o /tmp/mongodb-mms-4.4.10.100.20210303T2102Z-1.x86_64.rpm" | |
ansible -i run/host_list.conf om1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "rpm -i /tmp/mongodb-mms-*.rpm" | |
ansible -i run/host_list.conf om1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "sed -i 's/mongo.mongoUri.*/mongo.mongoUri=mongodb:\/\/$OPS_USER:$OPS_PASS@$HOST1:27017,$HOST2:27017,$HOST3:27017\/?maxPoolSize=150/g' /opt/mongodb/mms/conf/conf-mms.properties" | |
ansible -i run/host_list.conf om1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo 'mms.ignoreInitialUiSetup=true | |
mms.centralUrl=http://$HOST1:8080 | |
mms.fromEmailAddr=$EMAIL | |
mms.replyToEmailAddr=$EMAIL | |
mms.adminEmailAddr=$EMAIL | |
mms.mail.transport=smtps | |
mms.mail.hostname=mail.example.com | |
mms.mail.port=465' | tee -a /opt/mongodb/mms/conf/conf-mms.properties" | |
ansible -i run/host_list.conf om1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "systemctl start mongodb-mms" | |
ansible -i run/host_list.conf om1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "state=0; while [ \$state -eq 0 ]; do state=\$(curl -s -IL http://$HOST1:8080 | grep 200 | wc -l); echo \$state; sleep 5; done" | |
curl --digest --silent --header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request POST "http://$OM1:8080/api/public/v1.0/unauth/users?pretty=true&whitelist=127.0.0.1&whitelist=94.214.33.179" \ | |
--data '{ "username": "'$OM_USER_EMAIL'","password": "'$OM_USER_PASS'","firstName": "Dawid","lastName": "Esterhuizen"}' > run/user.json | |
PRIVATE_KEY=$(cat run/user.json | jq -r '.programmaticApiKey.privateKey') | |
PUBLIC_KEY=$(cat run/user.json | jq -r '.programmaticApiKey.publicKey') | |
sleep 10 | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request POST "http://$OM1:8080/api/public/v1.0/admin/apiKeys?pretty=true" \ | |
--data '{ "desc" : "Core Automation Key", "roles" : [ "GLOBAL_OWNER"] }' --output run/global_key.json | |
GLOBAL_PRIVATE_KEY=$(cat run/global_key.json | jq -r '.privateKey') | |
GLOBAL_PUBLIC_KEY=$(cat run/global_key.json | jq -r '.publicKey') | |
GLOBAL_KEY_ID=$(cat run/global_key.json | jq -r '.id') | |
sleep 10 | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://$OM1:8080/api/public/v1.0/admin/whitelist?pretty=true" \ | |
--data ' { "cidrBlock" : "127.0.0.1/32", "description" : "OM" }' | |
sleep 10 | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://$OM1:8080/api/public/v1.0/admin/whitelist?pretty=true" \ | |
--data ' { "cidrBlock" : "94.214.33.179/32", "description" : "home" }' | |
sleep 10 | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://$OM1:8080/api/public/v1.0/admin/whitelist?pretty=true" \ | |
--data ' { "cidrBlock" : "10.0.1.0/24", "description" : "Internal calls" }' | |
} | |
createOrganizarion() { | |
mongocli iam organization create "$ORG_NAME" -o json > run/organization.json | |
ORG_ID=$(cat run/organization.json | jq -r '.id') | |
sleep 10 | |
mongocli iam organizations apikeys create --orgId $ORG_ID --desc "Project Manager" --role ORG_OWNER -o json > run/org_key.json | |
sleep 10 | |
curl --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--include \ | |
--request POST "http://$OM1:8080/api/public/v1.0/orgs/$ORG_ID/apiKeys/$ORG_KEY_ID/whitelist?pretty=true" \ | |
--data '[{ "ipAddress" : "94.214.33.179" }]' | |
} | |
createProject() { | |
mongocli iam project create "$PROJECT_NAME" --orgId $ORG_ID -o json > run/project.json | |
PROJECT_ID=$(cat run/project.json | jq -r ".id") | |
sleep 10 | |
ORG_PRIVATE_KEY=$(cat run/org_key.json | jq -r '.privateKey') | |
ORG_PUBLIC_KEY=$(cat run/org_key.json | jq -r '.publicKey') | |
ORG_KEY_ID=$(cat run/org_key.json | jq -r '.id') | |
export MCLI_OPS_MANAGER_URL="http://$OM1:8080/" | |
export MCLI_ORG_ID=$ORG_ID | |
export MCLI_PRIVATE_API_KEY=$ORG_PRIVATE_KEY | |
export MCLI_PUBLIC_API_KEY=$ORG_PUBLIC_KEY | |
mongocli ops-manager security enable MONGODB-CR --projectId $PROJECT_ID | |
mongocli ops-manager security enable SCRAM-SHA-256 --projectId $PROJECT_ID | |
mongocli ops-manager automation get --projectId $PROJECT_ID -o json > run/config.json | |
sed -i '' 's/"tls": {/"tls": { \ | |
"CAFilePath": "\/data\/keys\/mongodb-ca.crt", /g' run/config.json | |
mongocli ops-manager automation update --projectId $PROJECT_ID --file run/config.json | |
mongocli ops-manager agents apiKey create --desc "automatic agent" --projectId $PROJECT_ID -o json > run/automation_api_key.json | |
} | |
createCluster() { | |
ansible -i run/host_list.conf dep1 -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostnamectl set-hostname $DEP_HOST1" | |
ansible -i run/host_list.conf dep2 -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostnamectl set-hostname $DEP_HOST2" | |
ansible -i run/host_list.conf dep3 -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostnamectl set-hostname $DEP_HOST3" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostname -f" | |
OM_URL=http:\\/\\/$HOST1:8080 | |
AGENT_KEY=$(cat run/automation_api_key.json | jq -r '.key' ) | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "curl -L $OM_URL/download/agent/automation/mongodb-mms-automation-agent-manager-latest.x86_64.rhel7.rpm -o /tmp/mongodb-mms-automation-agent-manager-latest.x86_64.rhel7.rpm" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "rpm -i /tmp/mongodb-mms-automation-agent-manager-latest.x86_64.rhel7.rpm" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "ls -l /tmp" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "sed -i 's/mmsGroupId=.*/mmsGroupId=$PROJECT_ID/g' /etc/mongodb-mms/automation-agent.config" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "sed -i 's/mmsApiKey=.*/mmsApiKey=$AGENT_KEY/g' /etc/mongodb-mms/automation-agent.config" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "sed -i 's/mmsBaseUrl=.*/mmsBaseUrl=$OM_URL/g' /etc/mongodb-mms/automation-agent.config" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "cat /etc/mongodb-mms/automation-agent.config" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "mkdir -p /data/db" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "mkdir -p /data/keys" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "mkdir -p /data/logs" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "chown mongod:mongod /data" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "systemctl restart mongodb-mms-automation-agent.service" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "/sbin/service mongodb-mms-automation-agent start" | |
sleep 20 | |
HOSTNAME1=$DEP_HOST1 | |
HOSTNAME2=$DEP_HOST2 | |
HOSTNAME3=$DEP_HOST3 | |
HOSTNAMEALT1=$(echo $DEP_HOST1 | cut -d '.' -f 1) | |
HOSTNAMEALT2=$(echo $DEP_HOST2 | cut -d '.' -f 1) | |
HOSTNAMEALT3=$(echo $DEP_HOST3 | cut -d '.' -f 1) | |
hosts=0 | |
echo '{ "results" : [{"hostname": "none"}] }' > run/agent_list.json | |
while [ "$hosts" -ne 3 ]; do | |
echo $hosts; | |
hosts=$hosts+$(cat run/agent_list.json | jq '.results[].hostname' | grep "$DEP_HOST1" | wc -l) | |
hosts=$hosts+$(cat run/agent_list.json | jq '.results[].hostname' | grep "$DEP_HOST2" | wc -l) | |
hosts=$hosts+$(cat run/agent_list.json | jq '.results[].hostname' | grep "$DEP_HOST3" | wc -l) | |
mongocli ops-manager agents list AUTOMATION --projectId $PROJECT_ID -o json > run/agent_list.json | |
done | |
echo '{ | |
"name": "{clusterName}", | |
"version": "{version}", | |
"featureCompatibilityVersion": "{fcv}", | |
"processes": [ | |
{ | |
"hostname": "{host1}", | |
"dbPath": "{dbPath}", | |
"logPath": "{logPath}", | |
"priority": 1, | |
"votes": 1, | |
"port": {dbPort}, | |
"tls" : { | |
"enabled": true, | |
"mode": "allowTLS", | |
"certificateKeyFile": "/data/keys/host.pem", | |
"CAFile": "/data/keys/mongodb-ca.crt" | |
} | |
}, | |
{ | |
"hostname": "{host2}", | |
"dbPath": "{dbPath}", | |
"logPath": "{logPath}", | |
"priority": 1, | |
"votes": 1, | |
"port": {dbPort}, | |
"tls" : { | |
"enabled": true, | |
"mode": "allowTLS", | |
"certificateKeyFile": "/data/keys/host.pem", | |
"CAFile": "/data/keys/mongodb-ca.crt" | |
} | |
}, | |
{ | |
"hostname": "{host3}", | |
"dbPath": "{dbPath}", | |
"logPath": "{logPath}", | |
"priority": 1, | |
"votes": 1, | |
"port": {dbPort}, | |
"tls" : { | |
"enabled": true, | |
"mode": "allowTLS", | |
"certificateKeyFile": "/data/keys/host.pem", | |
"CAFile": "/data/keys/mongodb-ca.crt" | |
} | |
} | |
] | |
}' > run/cluster_template.json | |
sed -i '' "s/{host1}/$DEP_HOST1/g" run/cluster_template.json | |
sed -i '' "s/{host2}/$DEP_HOST2/g" run/cluster_template.json | |
sed -i '' "s/{host3}/$DEP_HOST3/g" run/cluster_template.json | |
sed -i '' "s/{clusterName}/$CLUSTER_NAME/g" run/cluster_template.json | |
sed -i '' "s/{version}/$VERSION/g" run/cluster_template.json | |
sed -i '' "s/{fcv}/$FCV/g" run/cluster_template.json | |
sed -i '' "s/{dbPath}/$DB_PATH/g" run/cluster_template.json | |
sed -i '' "s/{logPath}/$LOG_PATH/g" run/cluster_template.json | |
sed -i '' "s/{dbPort}/$DB_PORT/g" run/cluster_template.json | |
openssl genrsa -out run/mongodb-ca.key 4096 | |
echo " | |
[ policy_match ] | |
countryName = match | |
stateOrProvinceName = match | |
organizationName = match | |
organizationalUnitName = optional | |
commonName = supplied | |
emailAddress = optional | |
[ req ] | |
default_bits = 4096 | |
default_keyfile = run/mongo-cert.key | |
default_md = sha256 | |
prompt = no | |
distinguished_name = req_dn | |
req_extensions = v3_req | |
x509_extensions = v3_ca | |
[ v3_req ] | |
subjectKeyIdentifier = hash | |
basicConstraints = CA:FALSE | |
keyUsage = critical, digitalSignature, keyEncipherment | |
extendedKeyUsage = serverAuth, clientAuth | |
[ req_dn ] | |
countryName = $COUNTRY | |
stateOrProvinceName = $PROVINCE | |
localityName = $CITY | |
organizationName = $COMPANY | |
organizationalUnitName = $ORGANIZATION_UNIT | |
emailAddress = $EMAIL | |
[ v3_ca ] | |
subjectKeyIdentifier = hash | |
basicConstraints = critical,CA:true | |
authorityKeyIdentifier = keyid:always,issuer:always" | tee run/ca.conf | |
openssl req -new -x509 -days 365 -key run/mongodb-ca.key -out run/mongodb-ca.crt -config run/ca.conf | |
openssl genrsa -out run/host1.key 4096; | |
openssl genrsa -out run/host2.key 4096; | |
openssl genrsa -out run/host3.key 4096; | |
echo " | |
[ req ] | |
default_bits = 4096 | |
distinguished_name = req_distinguished_name | |
req_extensions = req_ext | |
prompt = no | |
[ req_distinguished_name ] | |
countryName = $COUNTRY | |
stateOrProvinceName = $PROVINCE | |
localityName = $CITY | |
organizationalUnitName = $COMPANY | |
organizationalUnitName = $ORGANIZATION_UNIT | |
emailAddress = $EMAIL | |
commonName = $HOSTNAME1 | |
[ req_ext ] | |
subjectAltName = @alt_names | |
[alt_names] | |
DNS.1 = $HOSTNAME1 | |
DNS.2 = $HOSTNAMEALT1" | tee run/host1.conf | |
echo " | |
[ req ] | |
default_bits = 4096 | |
distinguished_name = req_distinguished_name | |
req_extensions = req_ext | |
prompt = no | |
[ req_distinguished_name ] | |
countryName = $COUNTRY | |
stateOrProvinceName = $PROVINCE | |
localityName = $CITY | |
organizationalUnitName = $COMPANY | |
organizationalUnitName = $ORGANIZATION_UNIT | |
emailAddress = $EMAIL | |
commonName = $HOSTNAME2 | |
[ req_ext ] | |
subjectAltName = @alt_names | |
[alt_names] | |
DNS.1 = $HOSTNAME2 | |
DNS.2 = $HOSTNAMEALT2" | tee run/host2.conf | |
echo " | |
[ req ] | |
default_bits = 4096 | |
distinguished_name = req_distinguished_name | |
req_extensions = req_ext | |
prompt = no | |
[ req_distinguished_name ] | |
countryName = $COUNTRY | |
stateOrProvinceName = $PROVINCE | |
localityName = $CITY | |
organizationalUnitName = $COMPANY | |
organizationalUnitName = $ORGANIZATION_UNIT | |
emailAddress = $EMAIL | |
commonName = $HOSTNAME3 | |
[ req_ext ] | |
subjectAltName = @alt_names | |
[alt_names] | |
DNS.1 = $HOSTNAME3 | |
DNS.2 = $HOSTNAMEALT3" | tee run/host3.conf | |
openssl req -new -sha256 -out run/host1.csr -key run/host1.key -config run/host1.conf | |
openssl req -new -sha256 -out run/host2.csr -key run/host2.key -config run/host2.conf | |
openssl req -new -sha256 -out run/host3.csr -key run/host3.key -config run/host3.conf | |
openssl x509 -req -in run/host1.csr -CA run/mongodb-ca.crt -CAkey run/mongodb-ca.key -CAcreateserial -out run/host1.crt -days 365 -sha256 -extensions req_ext -extfile run/host1.conf | |
openssl x509 -req -in run/host2.csr -CA run/mongodb-ca.crt -CAkey run/mongodb-ca.key -CAcreateserial -out run/host2.crt -days 365 -sha256 -extensions req_ext -extfile run/host2.conf | |
openssl x509 -req -in run/host3.csr -CA run/mongodb-ca.crt -CAkey run/mongodb-ca.key -CAcreateserial -out run/host3.crt -days 365 -sha256 -extensions req_ext -extfile run/host3.conf | |
cat run/host1.crt run/host1.key > run/host1.pem | |
cat run/host2.crt run/host2.key > run/host2.pem | |
cat run/host3.crt run/host3.key > run/host3.pem | |
openssl verify -verbose -CAfile run/mongodb-ca.crt run/host1.pem | |
openssl verify -verbose -CAfile run/mongodb-ca.crt run/host2.pem | |
openssl verify -verbose -CAfile run/mongodb-ca.crt run/host3.pem | |
ansible -i run/host_list.conf deployment -m ansible.builtin.file -u ec2-user --become --become-method sudo --become-user root -a "path=/data state=directory recurse=yes owner=mongod group=mongod" | |
ansible -i run/host_list.conf dep1 -m ansible.builtin.copy -u ec2-user --become --become-method sudo --become-user root -a "src=run/host1.pem dest=/data/keys/host.pem group=mongod owner=mongod mode=400" | |
ansible -i run/host_list.conf dep2 -m ansible.builtin.copy -u ec2-user --become --become-method sudo --become-user root -a "src=run/host2.pem dest=/data/keys/host.pem group=mongod owner=mongod mode=400" | |
ansible -i run/host_list.conf dep3 -m ansible.builtin.copy -u ec2-user --become --become-method sudo --become-user root -a "src=run/host3.pem dest=/data/keys/host.pem group=mongod owner=mongod mode=400" | |
ansible -i run/host_list.conf deployment -m ansible.builtin.copy -u ec2-user --become --become-method sudo --become-user root -a "src=run/mongodb-ca.crt dest=/data/keys/mongodb-ca.crt group=mongod owner=mongod mode=400" | |
ansible -i run/host_list.conf om -m ansible.builtin.copy -u ec2-user --become --become-method sudo --become-user root -a "src=run/mongodb-ca.crt dest=/data/keys/mongodb-ca.crt group=mongod owner=mongod mode=400" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "ls -l /data/keys/" | |
ansible -i run/host_list.conf om -m shell -u ec2-user --become --become-method sudo --become-user root -a "ls -l /data/keys/" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "pkill mongod" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "systemctl restart mongodb-mms-automation-agent" | |
ansible -i run/host_list.conf deployment -m shell -u ec2-user --become --become-method sudo --become-user root -a "hostname -f && openssl x509 -in /data/keys/host.pem -noout -subject" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ls /etc/passwd" | |
mongocli ops-manager clusters create --projectId $PROJECT_ID --file run/cluster_template.json | |
mongocli ops-manager automation watch --projectId $PROJECT_ID | |
mongocli --projectId $PROJECT_ID ops-manager monitoring enable $DEP_HOST1 | |
mongocli ops-manager automation watch --projectId $PROJECT_ID | |
mongocli --projectId $PROJECT_ID ops-manager monitoring enable $DEP_HOST2 | |
mongocli ops-manager automation watch --projectId $PROJECT_ID | |
mongocli --projectId $PROJECT_ID ops-manager monitoring enable $DEP_HOST3 | |
mongocli ops-manager automation watch --projectId $PROJECT_ID | |
} | |
buildLDAP() { | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "yum update" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "yum -y install openldap compat-openldap openldap-clients openldap-servers openldap-servers-sql openldap-devel" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "systemctl start slapd" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "systemctl enable slapd" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "netstat -antup | grep -i 389" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "slappasswd -h {SSHA} -s ldppassword" > run/ldap_output | |
LDAPPASSWORD=$(cat run/ldap_output | tail -n 1) | |
ansible -i run/host_list.conf all -m shell -u ec2-user --become --become-method sudo --become-user root -a "cat /etc/hosts" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo 'dn: olcDatabase={2}hdb,cn=config | |
changetype: modify | |
replace: olcSuffix | |
olcSuffix: dc=test,dc=local | |
dn: olcDatabase={2}hdb,cn=config | |
changetype: modify | |
replace: olcRootDN | |
olcRootDN: cn=ldapadm,dc=test,dc=local | |
dn: olcDatabase={2}hdb,cn=config | |
changetype: modify | |
replace: olcRootPW | |
olcRootPW: $LDAPPASSWORD' > /tmp/db.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/db.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo 'dn: olcDatabase={1}monitor,cn=config | |
changetype: modify | |
replace: olcAccess | |
olcAccess: {0}to * by dn.base=\"gidNumber=0+uidNumber=0,cn=peercred,cn=external, cn=auth\" read by dn.base=\"cn=ldapadm,dc=test,dc=local\" read by * none' > /tmp/monitor.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/monitor.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "cp /usr/share/openldap-servers/DB_CONFIG.example /var/lib/ldap/DB_CONFIG" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "chown ldap:ldap /var/lib/ldap/*" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/nis.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo 'dn: dc=test,dc=local | |
dc: test | |
objectClass: top | |
objectClass: domain | |
dn: cn=ldapadm ,dc=test,dc=local | |
objectClass: organizationalRole | |
cn: ldapadm | |
description: LDAP Manager | |
dn: ou=People,dc=test,dc=local | |
objectClass: organizationalUnit | |
ou: People | |
dn: ou=Group,dc=test,dc=local | |
objectClass: organizationalUnit | |
ou: Group' > /tmp/base.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldapadd -x -w ldppassword -D "cn=ldapadm,dc=test,dc=local" -f /tmp/base.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo 'dn: cn=bind,ou=People,dc=test,dc=local | |
objectClass: top | |
objectClass: account | |
objectClass: posixAccount | |
objectClass: shadowAccount | |
cn: bind | |
uid: bind | |
uidNumber: 16859 | |
gidNumber: 100 | |
homeDirectory: False | |
loginShell: False | |
gecos: bind | |
userPassword: {crypt}x | |
shadowLastChange: 0 | |
shadowMax: 0 | |
shadowWarning: 0'> /tmp/bind_user.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldapadd -x -w ldppassword -D \"cn=ldapadm,dc=test,dc=local\" -f /tmp/bind_user.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldappasswd -s password123 -w ldppassword -D \"cn=ldapadm,dc=test,dc=local\" -x \"cn=bind,ou=People,dc=test,dc=local\"" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "cat /etc/selinux/config" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "reboot" | |
sleep 60 | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo 'dn: cn=testuser,ou=People,dc=test,dc=local | |
objectClass: top | |
objectClass: account | |
objectClass: posixAccount | |
objectClass: shadowAccount | |
cn: testuser | |
uid: testuser | |
uidNumber: 16859 | |
gidNumber: 100 | |
homeDirectory: False | |
loginShell: False | |
gecos: bind | |
userPassword: {crypt}x | |
shadowLastChange: 0 | |
shadowMax: 0 | |
shadowWarning: 0'> /tmp/test_user.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldapadd -x -w ldppassword -D \"cn=ldapadm,dc=test,dc=local\" -f /tmp/test_user.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldappasswd -s password123 -w ldppassword -D \"cn=ldapadm,dc=test,dc=local\" -x \"cn=testuser,ou=People,dc=test,dc=local\"" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo 'dn: cn=dbadmin,ou=Group,dc=test,dc=local | |
objectClass: top | |
objectClass: posixGroup | |
gidNumber: 678' > /tmp/dba.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldapadd -x -w ldppassword -D \"cn=ldapadm,dc=test,dc=local\" -f /tmp/dba.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "echo 'dn: cn=dbadmin,ou=Group,dc=test,dc=local | |
changetype: modify | |
add: memberuid | |
memberuid: testuser' > /tmp/group_assign.ldif" | |
ansible -i run/host_list.conf ldap -m shell -u ec2-user --become --become-method sudo --become-user root -a "ldapadd -x -w ldppassword -D \"cn=ldapadm,dc=test,dc=local\" -f /tmp/group_assign.ldif" | |
} | |
setupLDAP() { | |
mongocli ops-manager automation get --projectId $PROJECT_ID -o json > run/config.json | |
sed -i '' 's/ldap": {}/ldap": { "authzQueryTemplate": "", "bindMethod": "simple", "bindQueryPassword": "password123", "bindQueryUser": "cn=bind,ou=People,dc=test,dc=local", "bindSaslMechanisms": "", "servers": "lab_ldap.test.local:389", "transportSecurity": "none", "userToDNMapping": "[ { match : \\"(.+)\\", ldapQuery: \\"DC=test,DC=local??sub?(uid={0})\\" }]", "validateLDAPServerConfig": false }/' run/config.json | |
cat run/config.json | jq '.auth.deploymentAuthMechanisms += ["PLAIN"]' > run/new_config.json | |
mongocli ops-manager automation update --projectId $PROJECT_ID --file run/new_config.json | |
} | |
PROJECT_NAME="Project One" | |
USER=admin | |
PASSWORD=Password123 | |
OPS_USER=opsmanager | |
OPS_PASS=Password123 | |
HOST1=om1.test.local | |
HOST2=om2.test.local | |
HOST3=om3.test.local | |
DEP_HOST1=dep1.test.local | |
DEP_HOST2=dep2.test.local | |
DEP_HOST3=dep3.test.local | |
LDAP_HOST=ldap.test.local | |
EMAIL="user@email.com" | |
ORG_NAME="TestLabs" | |
COUNTRY=UK | |
PROVINCE=State1 | |
CITY=City1 | |
COMPANY=WidgetWorld | |
ORGANIZATION_UNIT=UnitA | |
EMAIL=test@test.com | |
CLUSTER_NAME=test_cluster | |
VERSION="4.4.5-ent" | |
FCV="4.4" | |
DB_PATH="\/data\/db\/" | |
LOG_PATH="\/data\/logs\/mongodb.log" | |
DB_PORT=27017 | |
promptToExit "Did you load the ssh key into ssh agent?" | |
promptToExit "Do you want to build the Ops Manager backing Databases?" | |
buildBackingDBS | |
promptToExit "Do you want to install Ops Manager?" | |
installOpsManager | |
export MCLI_OPS_MANAGER_URL="http://$OM1:8080/" | |
export MCLI_ORG_ID=$ORG_ID | |
export MCLI_PRIVATE_API_KEY=$GLOBAL_PRIVATE_KEY | |
export MCLI_PUBLIC_API_KEY=$GLOBAL_PUBLIC_KEY | |
export MCLI_SERVICE="ops-manager" | |
promptToExit "Do you want to create an organization called $ORG_NAME?" | |
createOrganizarion | |
promptToExit "Do you want to create a project called $PROJECT_NAME?" | |
createProject | |
promptToExit "Do you want to create a cluster called $CLUSTER_NAME?" | |
createCluster | |
promptToExit "Do you want to create a test LDAP server?" | |
buildLDAP | |
promptToExit "Do you want to add LDAP to $CLUSTER_NAME?" | |
setupLDAP | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# ======================================================================================== | |
# Update Repos and install pre-reqs | |
# ======================================================================================== | |
sudo yum -y update | |
sudo yum -y install cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl net-snmp openldap openssl xz-libs jq | |
# ======================================================================================== | |
# Disable HugePage Files in - Redhat | |
# ======================================================================================== | |
echo "[Unit] | |
Description=Disable Transparent Huge Pages (THP) | |
DefaultDependencies=no | |
After=sysinit.target local-fs.target | |
Before=mongod.service | |
[Service] | |
Type=oneshot | |
ExecStart=/bin/sh -c 'echo never | tee /sys/kernel/mm/transparent_hugepage/enabled > /dev/null' | |
[Install] | |
WantedBy=basic.target" | sudo tee /etc/systemd/system/disable-transparent-huge-pages.service | |
# Redhat | |
# /sys/kernel/mm/redhat_transparent_hugepage/enabled | |
sudo systemctl daemon-reload | |
sudo systemctl start disable-transparent-huge-pages | |
sudo systemctl enable disable-transparent-huge-pages | |
# Check | |
cat /sys/kernel/mm/transparent_hugepage/enabled | |
# Check in Redhat | |
# cat /sys/kernel/mm/redhat_transparent_hugepage/enabled | |
# ======================================================================================== | |
# Disable HugePage Files in tuned - Redhat | |
# ======================================================================================== | |
sudo mkdir /etc/tuned/virtual-guest-no-thp | |
echo "[main] | |
include=virtual-guest | |
[vm] | |
transparent_hugepages=never" | sudo tee /etc/tuned/virtual-guest-no-thp/tuned.conf | |
sudo tuned-adm profile virtual-guest-no-thp | |
# ======================================================================================== | |
# Change the TCP Keepalive to 120 | |
# ======================================================================================== | |
sudo sysctl -w net.ipv4.tcp_keepalive_time=120 | |
# ======================================================================================== | |
# Check noatime for data mount point | |
# ======================================================================================== | |
mount | grep xfs | |
# ======================================================================================== | |
# Check noop disk scheduler and change if needed | |
# ======================================================================================== | |
cat /sys/block/hda/queue/scheduler | |
echo 'noop' | sudo tee /sys/block/hda/queue/scheduler | |
# ======================================================================================== | |
# Create required directories | |
# ======================================================================================== | |
sudo mkdir -p /data/appdb/ | |
sudo mkdir -p /data/bckdb | |
sudo mkdir -p /data/keys/ | |
# ======================================================================================== | |
# Install using rpms | |
# ======================================================================================== | |
cd /tmp | |
curl -OL https://repo.mongodb.com/yum/redhat/7/mongodb-enterprise/4.4/x86_64/RPMS/mongodb-enterprise-server-4.4.4-1.el7.x86_64.rpm | |
curl -OL https://repo.mongodb.com/yum/redhat/7/mongodb-enterprise/4.4/x86_64/RPMS/mongodb-enterprise-shell-4.4.4-1.el7.x86_64.rpm | |
sudo yum -y localinstall mongodb-enterprise-server-*.rpm | |
sudo yum -y localinstall mongodb-enterprise-shell-*.rpm | |
rm -rf /tmp/mongodb-enterprise-server-*.rpm | |
rm -rf /tmp/mongodb-enterprise-shell-*.rpm | |
# ======================================================================================== | |
# Install using package manager yum | |
# ======================================================================================== | |
echo "[mongodb-enterprise-4.4] | |
name=MongoDB Enterprise Repository | |
baseurl=https://repo.mongodb.com/yum/amazon/2/mongodb-enterprise/4.4/\$basearch/ | |
gpgcheck=1 | |
enabled=1 | |
gpgkey=https://www.mongodb.org/static/pgp/server-4.4.asc" | sudo tee /etc/yum.repos.d/mongodb-enterprise-4.4.repo | |
sudo yum update | |
sudo yum install -y mongodb-enterprise | |
# ======================================================================================== | |
# Configure mongodb app database | |
# ======================================================================================== | |
sudo sed -i 's/#replication:/replication:\n replSetName: appDB/g' /etc/mongod.conf | |
sudo sed -i 's/path:.*/path: \/data\/appdb\/mongod.log/g' /etc/mongod.conf | |
sudo sed -i 's/dbPath:.*/dbPath: \/data\/appdb/g' /etc/mongod.conf | |
sudo sed -i 's/bindIp:.*/bindIp: 0.0.0.0/g' /etc/mongod.conf | |
sudo sed -i 's/#security.*/security: \n authorization: enabled \n keyFile: \/data\/keys\/keyfile /g' /etc/mongod.conf | |
# TODO: Add the memory constraints for the shared cluster | |
# ======================================================================================== | |
# Generate a new keyfile on aitomation machine | |
# ======================================================================================== | |
openssl rand -base64 756 | sudo tee /data/keys/keyfile | |
# ======================================================================================== | |
# Load keyfile into hosts | |
# ======================================================================================== | |
echo '6UdRyW1Hv7xCFVzZwGdsbz4FhzilrGwHs4KEDsnIa5pOY28bbS2wgUtnt5tmbHif | |
BoLRIvbWurwQYwDYdoYUYQLQJyGjd7YIGnteETjRaMDxUwQFUZ2gF4dBdytbnfVn | |
BXTnMDGbPKXN42HnFJTfaaaEt088etI7+Wcco/dMyYIKlCSTcANmyfsjfHchmbZo | |
2gj9JVjhaOmIG5WAdOCRttMJw+q+hV+doMS2FVomVYLYF4KMdFFsK0J1UqD3TspU | |
HDLpCciuUiZbyCWHqSzlXRvN/1POvrCd0LRb/IZiq9vJnxw2TGAlaAKU9P2lND8t | |
6EvNuutQFdXyxd/6pgY5kQICrkgRW/chbn8DkKUZ2A81kivXG3pWotRMoiSjIlkA | |
dqLGzcpVSBC4ckWs/sBz8uv+PEcH/PcQ513qVARvMblVPYuVe7Jt8IB25zN8+g0T | |
Ihx31f7gaZeR6w4qXm2S1A6QpE48yjc4DxCwGYPs8/96SlHkb4pbV2esjQBQ31H1 | |
Os3BQsGXoo6P9WzKDSTGhdlbPecO/tZibFDt9jULNTKjFTfCyHxqrdsLV1sukCAU | |
hug5l0MUrifNzGAy5P9T7s8K6OTtU6Yaoy1CTFhHKlrHKWXhUVlsoPWUGUhpW0AH | |
Nxy1al2PM5x/yjAqQg/vo43ky2yqNDLdzQr+4xyLO9Do7H2VqzVkkSLy7nRMJsdC | |
fZwMpXQBr518xFIlIeFS+zlmxJYD44u3+SBd2jIt68rTpgVA4mhLizwivmG8KW1W | |
wYTBmEgE7arc6lXj3nxw37OSYV0T3n+1g8ekynSXNLNM3eV7ws7N1vn/yuARCvJs | |
fgQopeMfskRAO3oN7xQKIrTpwYsDsH6cFm3Cq3OyGnAighCDSuCgM+2wvAU9Ytup | |
mkZot3mGGXmua5rrEFwnMUmdPl+bsq9yt7GSqVfcm8M1v/agzbYCtgxZMy12S3BK | |
/FZz6OhAtSlG3zoB4akfwAqHXgpZajJhxWuYgcIYvEFUchg0' | sudo tee /data/keys/keyfile | |
# ======================================================================================== | |
# Fix Permissions | |
# ======================================================================================== | |
sudo chown -R mongod:mongod /data | |
sudo chmod 400 /data/keys/keyfile | |
# ======================================================================================== | |
# SELinux !!!!!! | |
# ======================================================================================== | |
# ======================================================================================== | |
# Whitelist FTDC operatons to capture data | |
# ======================================================================================== | |
sudo yum install checkpolicy | |
cat > mongodb_cgroup_memory.te <<EOF | |
module mongodb_cgroup_memory 1.0; | |
require { | |
type cgroup_t; | |
type mongod_t; | |
class dir search; | |
class file { getattr open read }; | |
} | |
#============= mongod_t ============== | |
allow mongod_t cgroup_t:dir search; | |
allow mongod_t cgroup_t:file { getattr open read }; | |
EOF | |
checkmodule -M -m -o mongodb_cgroup_memory.mod mongodb_cgroup_memory.te | |
semodule_package -o mongodb_cgroup_memory.pp -m mongodb_cgroup_memory.mod | |
sudo semodule -i mongodb_cgroup_memory.pp | |
# ======================================================================================== | |
# Whitelist FTDC operatons to capture data | |
# ======================================================================================== | |
cat > mongodb_proc_net.te <<EOF | |
module mongodb_proc_net 1.0; | |
require { | |
type proc_net_t; | |
type mongod_t; | |
class file { open read }; | |
} | |
#============= mongod_t ============== | |
allow mongod_t proc_net_t:file { open read }; | |
EOF | |
checkmodule -M -m -o mongodb_proc_net.mod mongodb_proc_net.te | |
semodule_package -o mongodb_proc_net.pp -m mongodb_proc_net.mod | |
sudo semodule -i mongodb_proc_net.pp | |
# ======================================================================================== | |
# Whitelist the directories for use by mongodb | |
# ======================================================================================== | |
sudo semanage fcontext -a -t mongod_log_t '/data/appdb.*' | |
sudo chcon -Rv -u system_u -t mongod_log_t '/data/appdb' | |
sudo restorecon -R -v '/data/appdb' | |
sudo semanage fcontext -a -t mongod_var_lib_t '/data/keys.*' | |
sudo chcon -Rv -u system_u -t mongod_var_lib_t '/data/keys' | |
sudo restorecon -R -v '/data/keys' | |
sudo semanage fcontext -a -t mongod_var_lib_t '/data/appdb.*' | |
sudo chcon -Rv -u system_u -t mongod_var_lib_t '/data/appdb' | |
sudo restorecon -R -v '/data/appdb' | |
# ======================================================================================== | |
# Enable mongod for the app db and start it | |
# ======================================================================================== | |
sudo systemctl enable mongod | |
sudo systemctl start mongod | |
# Single Host | |
tf show --json | jq -r '.values.root_module.resources[].values | select (.instance_type != null) | .private_dns' | |
HOST1=ip-10-0-1-19.eu-west-1.compute.internal | |
HOST2=ip-10-0-1-102.eu-west-1.compute.internal | |
HOST3=ip-10-0-1-204.eu-west-1.compute.internal | |
USERNAME=admin | |
PASSWORD=password | |
OPS_USER=opsmanager | |
OPS_PASS=password | |
# ======================================================================================== | |
# Initialise the appdb database | |
# ======================================================================================== | |
mongo localhost --eval "rs.initiate({_id: \"appDB\", version: 1, members:[{_id: 0, host:\"$HOST1\"},{_id: 1, host: \"$HOST2\"},{_id: 2, host: \"$HOST3\"}]})" | |
# ======================================================================================== | |
# Create the root Admin user and Ops Manager User | |
# ======================================================================================== | |
mongo --host localhost/admin --eval 'db.createUser({user: "'$USER'", pwd: "'$PASSWORD'", roles: [{role: "root", db: "admin"}]})' | |
mongo --host "mongodb://localhost:27017/admin?replicaSet=appDB" -u $USER -p $PASSWORD --eval 'db.createUser({user: "'$OPS_USER'", pwd: "'$OPS_PASS'", roles: [{role: "readWriteAnyDatabase", db: "admin"}, {role: "dbAdminAnyDatabase", db: "admin"}, {role: "clusterAdmin", db: "admin"}]})' | |
# ======================================================================================== | |
# Download and install MongoDB Ops Manager | |
# ======================================================================================== | |
cd /tmp | |
curl -OL https://downloads.mongodb.com/on-prem-mms/rpm/mongodb-mms-4.4.10.100.20210303T2102Z-1.x86_64.rpm | |
sudo rpm -i mongodb-mms-*.rpm | |
sudo rm /tmp/mongodb-mms-*.rpm | |
# ======================================================================================== | |
# Configure Ops Manager | |
# ======================================================================================== | |
EMAIL="desterhuizen@gmail.com" | |
sudo sed -i "s/mongo.mongoUri.*/mongo.mongoUri=mongodb:\/\/$OPS_USER:$OPS_PASS@$HOST1:27017,$HOST2:27017,$HOST3:27017\/?maxPoolSize=150/g" /opt/mongodb/mms/conf/conf-mms.properties | |
echo "mms.ignoreInitialUiSetup=true | |
mms.centralUrl=http://$(hostname):8080 | |
mms.fromEmailAddr=$EMAIL | |
mms.replyToEmailAddr=$EMAIL | |
mms.adminEmailAddr=$EMAIL | |
mms.mail.transport=smtps | |
mms.mail.hostname=mail.example.com | |
mms.mail.port=465" | sudo tee -a /opt/mongodb/mms/conf/conf-mms.properties | |
# ======================================================================================== | |
# Start Ops Manager | |
# ======================================================================================== | |
sudo systemctl start mongodb-mms | |
# ======================================================================================== | |
# Check if the host is up | |
# ======================================================================================== | |
state=0 | |
while [ $state -eq 0 ]; do state=$(curl -s -IL http://localhost:8080 | grep 200 | wc -l); echo $state; sleep 5; done | |
# ======================================================================================== | |
# Navigate to Ops Mananger and register a new user | |
# ======================================================================================== | |
echo "Navigate to: http://$(hostname):8080/ and register your first user" | |
echo "Navigate to: http://$(curl -s ifconfig.co):8080/ and register your first user" | |
# ======================================================================================== | |
# Install jq if needed | |
# ======================================================================================== | |
sudo yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm | |
sudo yum install jq -y | |
# ======================================================================================== | |
# Automatically Create a user with API keys | |
# ======================================================================================== | |
curl --digest --silent --header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request POST "http://localhost:8080/api/public/v1.0/unauth/users?pretty=true&whitelist=127.0.0.1&whitelist=<IPADDRESS>" \ | |
--data '{ "username": "<user@email.com>","password": "<password>","firstName": "<FirstName>","lastName": "<lastName>"}' > user.json | |
# ======================================================================================== | |
# Automatically Create a user with API keys | |
# ======================================================================================== | |
PRIVATE_KEY=$(cat user.json | jq -r '.programmaticApiKey.privateKey') | |
PUBLIC_KEY=$(cat user.json | jq -r '.programmaticApiKey.publicKey') | |
# ======================================================================================== | |
# Create a Programatic Key | |
# ======================================================================================== | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request POST "http://localhost:8080/api/public/v1.0/admin/apiKeys?pretty=true" \ | |
--data '{ "desc" : "Core Automation Key", "roles" : [ "GLOBAL_OWNER"] }' --output global_key.json | |
GLOBAL_PRIVATE_KEY=$(cat global_key.json | jq -r '.privateKey') | |
GLOBAL_PUBLIC_KEY=$(cat global_key.json | jq -r '.publicKey') | |
GLOBAL_KEY_ID=$(cat global_key.json | jq -r '.id') | |
# ======================================================================================== | |
# Create a Programatic Key whitelist | |
# ======================================================================================== | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://localhost:8080/api/public/v1.0/admin/whitelist?pretty=true" \ | |
--data ' { "cidrBlock" : "127.0.0.1/32", "description" : "OM" }' | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://localhost:8080/api/public/v1.0/admin/whitelist?pretty=true" \ | |
--data ' { "cidrBlock" : "<IPADDRESS>/32", "description" : "home" }' | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://localhost:8080/api/public/v1.0/admin/whitelist?pretty=true" \ | |
--data ' { "cidrBlock" : "10.0.1.0/24", "description" : "Internal calls" }' | |
# ======================================================================================== | |
# Install mongocli | |
# ======================================================================================== | |
curl -OL https://github.com//mongodb/mongocli/releases/download/v1.13.0/mongocli_1.13.0_linux_x86_64.rpm | |
sudo rpm -ivh mongocli_1.13.0_linux_x86_64.rpm | |
echo "[profile1] | |
ops_manager_url = \"http://ip-10-0-1-19:8080/\" | |
org_id = \"$ORG_ID\" | |
private_api_key = \"$GLOBAL_PRIVATE_KEY\" | |
public_api_key = \"$GLOBAL_PUBLIC_KEY\" | |
service = \"ops-manager\"" | tee ~/.config/mongocli.toml | |
# ======================================================================================== | |
# Automatically Create a orgnization with API keys | |
# ======================================================================================== | |
mongocli --profile profile1 iam organization create profile1 -o json > org_2.json | |
ORG_ID=$(cat organization.json | jq -r '.id') | |
# ======================================================================================== | |
# Check project existance | |
# ======================================================================================== | |
mongocli --profile profile1 iam project describe 607443c4fdb7515362805233 | |
# ======================================================================================== | |
# Create a new Project | |
# ======================================================================================== | |
PROJECT_NAME="OpsManager Databases" | |
mongocli --profile profile1 iam project create $PROJECT_NAME --orgId $ORG_ID -o json > project.json | |
PROJECT_ID=$(cat project.json | jq -r ".id") | |
# ======================================================================================== | |
# Create a org_API_KEY - | |
# ======================================================================================== | |
mongocli --profile profile1 iam organizations apikeys create --orgId $ORG_ID --desc "Project Manager" --role ORG_OWNER -o json > org_key.json | |
ORG_PRIVATE_KEY=$(cat org_key.json | jq -r '.privateKey') | |
ORG_PUBLIC_KEY=$(cat org_key.json | jq -r '.publicKey') | |
ORG_KEY_ID=$(cat org_key.json | jq -r '.id') | |
mongocli --profile profile1 iam organizations apikeys create --orgId $ORG_ID --desc "Project Manager" --role ORG_OWNER -o json > org_key.json | |
mongocli --profile profile1 iam organizations apikeys whitelist create --apiKey $ORG_KEY_ID --cidr 10.0.1.0/24 --orgId $ORG_ID -o json > whitelist_result.json | |
# ======================================================================================== | |
# Enable security | |
# ======================================================================================== | |
echo "[profile1_org] | |
ops_manager_url = \"http://ip-10-0-1-19:8080/\" | |
org_id = \"$ORG_ID\" | |
private_api_key = \"$ORG_PRIVATE_KEY\" | |
public_api_key = \"$ORG_PUBLIC_KEY\" | |
service = \"ops-manager\"" | tee -a ~/.config/mongocli.toml | |
mongocli --profile profile1_org ops-manager security enable MONGODB-CR --projectId $PROJECT_ID | |
mongocli --profile profile1_org ops-manager security enable SCRAM-SHA-256 --projectId $PROJECT_ID | |
# ======================================================================================== | |
# Create autmation_api_key | |
# ======================================================================================== | |
mongocli --profile profile1_org ops-manager agents apiKey create --desc "automatic agent" --projectId $PROJECT_ID -o json > autmation_api_key.json | |
OM_URL="<OpsManagerURL>" | |
AGENT_KEY=$(cat automation_api_key.json | jq -r '.key' ) | |
# ======================================================================================== | |
# On target hosts install agents | |
# ======================================================================================== | |
curl -L $OM_URL/download/agent/automation/mongodb-mms-automation-agent-manager-latest.x86_64.rhel7.rpm -o /tmp/mongodb-mms-automation-agent-manager-latest.x86_64.rhel.rpm | |
sudo rpm -U /tmp/mongodb-mms-automation-agent-manager-latest.x86_64.rhel.rpm | |
sudo sed -i "s/mmsGroupId=.*/mmsGroupId=$PROJECT_ID/g" /etc/mongodb-mms/automation-agent.config | |
sudo sed -i "s/mmsApiKey=.*/mmsApiKey=$AGENT_KEY/g" /etc/mongodb-mms/automation-agent.config | |
sudo sed -i "s/mmsBaseUrl=.*/mmsBaseUrl=$OM_URL/g" /etc/mongodb-mms/automation-agent.config | |
# ======================================================================================== | |
# Start and enable the agents | |
# ======================================================================================== | |
sudo systemctl start mongodb-mms-automation-agent.service | |
sudo /sbin/service mongodb-mms-automation-agent start | |
# ======================================================================================== | |
# Enable Monitoring Agents | |
# ======================================================================================== | |
mongocli --profile profile1_org --projectId $PROJECT_ID ops-manager monitoring enable $DEP_HOST1 | |
mongocli --profile profile1_org --projectId $PROJECT_ID ops-manager monitoring enable $DEP_HOST2 | |
# ======================================================================================== | |
# INITIALISE Backup cluster | |
# ======================================================================================== | |
mongocli --profile profile1_org ops-manager | |
PROJECT_ID=607581eefdb7515362830821 | |
mongocli --profile profile1_org ops-manager agents list AUTOMATION --projectId $PROJECT_ID -o json > agent_list.json | |
DEP_HOST1=ip-10-0-1-102.eu-west-1.compute.internal | |
DEP_HOST2=ip-10-0-1-19.eu-west-1.compute.internal | |
DEP_HOST3=ip-10-0-1-204.eu-west-1.compute.internal | |
HOSTNAME1=$DEP_HOST1 | |
HOSTNAME2=$DEP_HOST2 | |
HOSTNAME3=$DEP_HOST3 | |
HOSTNAMEALT1=$DEP_HOST1 | |
HOSTNAMEALT2=$DEP_HOST2 | |
HOSTNAMEALT3=$DEP_HOST3 | |
# ======================================================================================== | |
# Enable Monitoring Agents | |
# ======================================================================================== | |
mongocli --profile profile1_org --projectId $PROJECT_ID ops-manager monitoring enable $DEP_HOST1 | |
mongocli --profile profile1_org --projectId $PROJECT_ID ops-manager monitoring enable $DEP_HOST2 | |
# ======================================================================================== | |
# Build Template | |
# ======================================================================================== | |
echo '{ | |
"name": "{clusterName}", | |
"version": "{version}", | |
"featureCompatibilityVersion": "{fcv}", | |
"processes": [ | |
{ | |
"hostname": "{host1}", | |
"dbPath": "{dbPath}", | |
"logPath": "{logPath}", | |
"priority": 1, | |
"votes": 1, | |
"port": {dbPort} | |
}, | |
{ | |
"hostname": "{host2}", | |
"dbPath": "{dbPath}", | |
"logPath": "{logPath}", | |
"priority": 1, | |
"votes": 1, | |
"port": {dbPort} | |
}, | |
{ | |
"hostname": "{host3}", | |
"dbPath": "{dbPath}", | |
"logPath": "{logPath}", | |
"priority": 1, | |
"votes": 1, | |
"port": {dbPort} | |
} | |
] | |
}' > cluster_template.json | |
# ======================================================================================== | |
# Configure template for use with the mongocli | |
# ======================================================================================== | |
CLUSTER_NAME=backup_db | |
VERSION="4.4.5" | |
FCV="4.4" | |
DB_PATH="\/data\/bckdb\/" | |
LOG_PATH="\/data\/bckdb\/mongodb.log" | |
DB_PORT=27018 | |
sed -i "s/{host1}/$DEP_HOST1/g" cluster_template.json | |
sed -i "s/{host2}/$DEP_HOST2/g" cluster_template.json | |
sed -i "s/{host3}/$DEP_HOST3/g" cluster_template.json | |
sed -i "s/{clusterName}/$CLUSTER_NAME/g" cluster_template.json | |
sed -i "s/{version}/$VERSION/g" cluster_template.json | |
sed -i "s/{fcv}/$FCV/g" cluster_template.json | |
sed -i "s/{dbPath}/$DB_PATH/g" cluster_template.json | |
sed -i "s/{logPath}/$LOG_PATH/g" cluster_template.json | |
sed -i "s/{dbPort}/$DB_PORT/g" cluster_template.json | |
# ======================================================================================== | |
# Create the cluster | |
# ======================================================================================== | |
mongocli --profile profile1_org ops-manager clusters create --projectId $PROJECT_ID --file cluster_template.json | |
# ======================================================================================== | |
# Enable tls in a project | |
# ======================================================================================== | |
mongocli --profile profile1_org ops-manager automation get --projectId $PROJECT_ID -o json > config.json | |
sed -i 's/"tls": {/"tls": {\n "CAFilePath": "\/data\/keys\/mongodb-ca.crt", /g' config.json | |
mongocli --profile profile1_org ops-manager automation update --projectId $PROJECT_ID --file config.json | |
# ======================================================================================== | |
# Enable watch change applied | |
# ======================================================================================== | |
mongocli --profile $MONGO_CLI_PROFILE ops-manager automation watch --projectId $PROJECT_ID | |
# ======================================================================================== | |
# Enable backup agents | |
# ======================================================================================== | |
mongocli --profile profile1_org ops-manager backups enable $DEP_HOST2 --projectId $PROJECT_ID | |
mongocli --profile profile1_org ops-manager backups enable $DEP_HOST3 --projectId $PROJECT_ID | |
# ======================================================================================== | |
# Enable auth in a project | |
# ======================================================================================== | |
mongocli --profile profile1_org ops-manager security enable MONGODB-CR --projectId $PROJECT_ID | |
mongocli --profile profile1_org ops-manager security enable SCRAM-SHA-256 --projectId $PROJECT_ID | |
# ======================================================================================== | |
# Create a backup user | |
# ======================================================================================== | |
mongocli --profile profile1_org ops-manager dbusers create --projectId $PROJECT_ID --username backup -p password1234 --role readWriteAnyDatabase@admin | |
# ======================================================================================== | |
# Create a oplog store | |
# ======================================================================================== | |
mongocli --profile profile1 om admin backups oplog create --uri "mongodb://backup:password1234@ip-10-0-1-102.eu-west-1.compute.internal:27018,ip-10-0-1-19.eu-west-1.compute.internal:27018,ip-10-0-1-204.eu-west-1.compute.internal:27018" --name oplog1 --assignment true | |
# ======================================================================================== | |
# Create a filesystem store | |
# ======================================================================================== | |
mongocli --profile profile1 om admin backups filesystem create --storePath /data/snaps/ --name base --wtCompressionSetting GZIP --mmapv1CompressionSetting GZIP | |
# ======================================================================================== | |
# Create a block store | |
# ======================================================================================== | |
mongocli --profile profile1 om admin backups blockstore create --uri "mongodb://backup:password1234@ip-10-0-1-102.eu-west-1.compute.internal:27018,ip-10-0-1-19.eu-west-1.compute.internal:27018,ip-10-0-1-204.eu-west-1.compute.internal:27018" --name oplog1 --assignment true | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# ======================================================================================== | |
# Update Repos and install pre-reqs | |
# ======================================================================================== | |
sudo yum -y update | |
sudo yum -y install cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl net-snmp openldap openssl xz-libs jq | |
# ======================================================================================== | |
# Disable HugePage Files in - Redhat | |
# ======================================================================================== | |
echo "[Unit] | |
Description=Disable Transparent Huge Pages (THP) | |
DefaultDependencies=no | |
After=sysinit.target local-fs.target | |
Before=mongod.service | |
[Service] | |
Type=oneshot | |
ExecStart=/bin/sh -c 'echo never | tee /sys/kernel/mm/transparent_hugepage/enabled > /dev/null' | |
[Install] | |
WantedBy=basic.target" | sudo tee /etc/systemd/system/disable-transparent-huge-pages.service | |
# Redhat | |
# /sys/kernel/mm/redhat_transparent_hugepage/enabled | |
sudo systemctl daemon-reload | |
sudo systemctl start disable-transparent-huge-pages | |
sudo systemctl enable disable-transparent-huge-pages | |
# Check | |
cat /sys/kernel/mm/transparent_hugepage/enabled | |
# Check in Redhat | |
# cat /sys/kernel/mm/redhat_transparent_hugepage/enabled | |
# ======================================================================================== | |
# Disable HugePage Files in tuned - Redhat | |
# ======================================================================================== | |
sudo mkdir /etc/tuned/virtual-guest-no-thp | |
echo "[main] | |
include=virtual-guest | |
[vm] | |
transparent_hugepages=never" | sudo tee /etc/tuned/virtual-guest-no-thp/tuned.conf | |
sudo tuned-adm profile virtual-guest-no-thp | |
# ======================================================================================== | |
# Change the TCP Keepalive to 120 | |
# ======================================================================================== | |
sudo sysctl -w net.ipv4.tcp_keepalive_time=120 | |
# ======================================================================================== | |
# Check noatime for data mount point | |
# ======================================================================================== | |
mount | grep xfs | |
# ======================================================================================== | |
# Check noop disk scheduler and change if needed | |
# ======================================================================================== | |
cat /sys/block/xvda/queue/scheduler | |
echo 'noop' | sudo tee /sys/block/xvda/queue/scheduler | |
# ======================================================================================== | |
# Create required directories | |
# ======================================================================================== | |
sudo mkdir -p /data/appdb/ | |
sudo mkdir -p /data/bckdb | |
sudo mkdir -p /data/keys/ | |
# ======================================================================================== | |
# Install using rpms | |
# ======================================================================================== | |
cd /tmp | |
curl -OL https://repo.mongodb.com/yum/redhat/7/mongodb-enterprise/4.4/x86_64/RPMS/mongodb-enterprise-server-4.4.6-1.el7.x86_64.rpm | |
curl -OL https://repo.mongodb.com/yum/redhat/7/mongodb-enterprise/4.4/x86_64/RPMS/mongodb-enterprise-shell-4.4.6-1.el7.x86_64.rpm | |
sudo yum -y localinstall mongodb-enterprise-server-*.rpm | |
sudo yum -y localinstall mongodb-enterprise-shell-*.rpm | |
rm -rf /tmp/mongodb-enterprise-server-*.rpm | |
rm -rf /tmp/mongodb-enterprise-shell-*.rpm | |
# ======================================================================================== | |
# Configure mongodb app database | |
# ======================================================================================== | |
sudo sed -i 's/#replication:/replication:\n replSetName: appDB/g' /etc/mongod.conf | |
sudo sed -i 's/path:.*/path: \/data\/appdb\/mongod.log/g' /etc/mongod.conf | |
sudo sed -i 's/dbPath:.*/dbPath: \/data\/appdb/g' /etc/mongod.conf | |
sudo sed -i 's/bindIp:.*/bindIp: 0.0.0.0/g' /etc/mongod.conf | |
sudo sed -i 's/#security.*/security: \n authorization: enabled \n keyFile: \/data\/keys\/keyfile /g' /etc/mongod.conf | |
sudo sed -i 's/# wiredTiger.*/ wiredTiger: \n engineConfig: \n cacheSizeGB: 0.5/g' /etc/mongod.conf | |
# ======================================================================================== | |
# Generate a new keyfile on aitomation machine | |
# ======================================================================================== | |
openssl rand -base64 756 | sudo tee /data/keys/keyfile | |
# ======================================================================================== | |
# Load keyfile into hosts | |
# ======================================================================================== | |
echo '6UdRyW1Hv7xCFVzZwGdsbz4FhzilrGwHs4KEDsnIa5pOY28bbS2wgUtnt5tmbHif | |
BoLRIvbWurwQYwDYdoYUYQLQJyGjd7YIGnteETjRaMDxUwQFUZ2gF4dBdytbnfVn | |
BXTnMDGbPKXN42HnFJTfaaaEt088etI7+Wcco/dMyYIKlCSTcANmyfsjfHchmbZo | |
2gj9JVjhaOmIG5WAdOCRttMJw+q+hV+doMS2FVomVYLYF4KMdFFsK0J1UqD3TspU | |
HDLpCciuUiZbyCWHqSzlXRvN/1POvrCd0LRb/IZiq9vJnxw2TGAlaAKU9P2lND8t | |
6EvNuutQFdXyxd/6pgY5kQICrkgRW/chbn8DkKUZ2A81kivXG3pWotRMoiSjIlkA | |
dqLGzcpVSBC4ckWs/sBz8uv+PEcH/PcQ513qVARvMblVPYuVe7Jt8IB25zN8+g0T | |
Ihx31f7gaZeR6w4qXm2S1A6QpE48yjc4DxCwGYPs8/96SlHkb4pbV2esjQBQ31H1 | |
Os3BQsGXoo6P9WzKDSTGhdlbPecO/tZibFDt9jULNTKjFTfCyHxqrdsLV1sukCAU | |
hug5l0MUrifNzGAy5P9T7s8K6OTtU6Yaoy1CTFhHKlrHKWXhUVlsoPWUGUhpW0AH | |
Nxy1al2PM5x/yjAqQg/vo43ky2yqNDLdzQr+4xyLO9Do7H2VqzVkkSLy7nRMJsdC | |
fZwMpXQBr518xFIlIeFS+zlmxJYD44u3+SBd2jIt68rTpgVA4mhLizwivmG8KW1W | |
wYTBmEgE7arc6lXj3nxw37OSYV0T3n+1g8ekynSXNLNM3eV7ws7N1vn/yuARCvJs | |
fgQopeMfskRAO3oN7xQKIrTpwYsDsH6cFm3Cq3OyGnAighCDSuCgM+2wvAU9Ytup | |
mkZot3mGGXmua5rrEFwnMUmdPl+bsq9yt7GSqVfcm8M1v/agzbYCtgxZMy12S3BK | |
/FZz6OhAtSlG3zoB4akfwAqHXgpZajJhxWuYgcIYvEFUchg0' | sudo tee /data/keys/keyfile | |
# ======================================================================================== | |
# Fix Permissions | |
# ======================================================================================== | |
sudo chown -R mongod:mongod /data | |
sudo chmod 400 /data/keys/keyfile | |
# ======================================================================================== | |
# SELinux !!!!!! | |
# ======================================================================================== | |
# ======================================================================================== | |
# Whitelist FTDC operatons to capture data | |
# ======================================================================================== | |
sudo yum install checkpolicy | |
cat > mongodb_cgroup_memory.te <<EOF | |
module mongodb_cgroup_memory 1.0; | |
require { | |
type cgroup_t; | |
type mongod_t; | |
class dir search; | |
class file { getattr open read }; | |
} | |
#============= mongod_t ============== | |
allow mongod_t cgroup_t:dir search; | |
allow mongod_t cgroup_t:file { getattr open read }; | |
EOF | |
checkmodule -M -m -o mongodb_cgroup_memory.mod mongodb_cgroup_memory.te | |
semodule_package -o mongodb_cgroup_memory.pp -m mongodb_cgroup_memory.mod | |
sudo semodule -i mongodb_cgroup_memory.pp | |
# ======================================================================================== | |
# Whitelist FTDC operatons to capture data | |
# ======================================================================================== | |
cat > mongodb_proc_net.te <<EOF | |
module mongodb_proc_net 1.0; | |
require { | |
type proc_net_t; | |
type mongod_t; | |
class file { open read }; | |
} | |
#============= mongod_t ============== | |
allow mongod_t proc_net_t:file { open read }; | |
EOF | |
checkmodule -M -m -o mongodb_proc_net.mod mongodb_proc_net.te | |
semodule_package -o mongodb_proc_net.pp -m mongodb_proc_net.mod | |
sudo semodule -i mongodb_proc_net.pp | |
# ======================================================================================== | |
# Whitelist the directories for use by mongodb | |
# ======================================================================================== | |
sudo semanage fcontext -a -t mongod_var_lib_t '/data/keys.*' | |
sudo chcon -Rv -u system_u -t mongod_var_lib_t '/data/keys' | |
sudo restorecon -R -v '/data/keys' | |
sudo semanage fcontext -a -t mongod_log_t '/data/appdb.*' | |
sudo chcon -Rv -u system_u -t mongod_log_t '/data/appdb' | |
sudo restorecon -R -v '/data/appdb' | |
sudo semanage fcontext -a -t mongod_var_lib_t '/data/appdb.*' | |
sudo chcon -Rv -u system_u -t mongod_var_lib_t '/data/appdb' | |
sudo restorecon -R -v '/data/appdb' | |
# ======================================================================================== | |
# Enable mongod for the app db and start it | |
# ======================================================================================== | |
sudo systemctl enable mongod | |
sudo systemctl start mongod | |
# Single Host | |
tf show --json | jq -r '.values.root_module.resources[].values | select (.instance_type != null) | .private_dns' | |
HOST1=ip-10-0-1-56.eu-central-1.compute.internal | |
HOST2=ip-10-0-1-34.eu-central-1.compute.internal | |
HOST3=ip-10-0-1-41.eu-central-1.compute.internal | |
USERNAME=admin | |
PASSWORD=password | |
OPS_USER=opsmanager | |
OPS_PASS=password | |
# ======================================================================================== | |
# Initialise the appdb database | |
# ======================================================================================== | |
mongo localhost --eval "rs.initiate({_id: \"appDB\", version: 1, members:[{_id: 0, host:\"$HOST1\"},{_id: 1, host: \"$HOST2\"},{_id: 2, host: \"$HOST3\"}]})" | |
# ======================================================================================== | |
# Create the root Admin user and Ops Manager User | |
# ======================================================================================== | |
mongo --host "mongodb://localhost/admin" --eval 'db.createUser({user: "'$USER'", pwd: "'$PASSWORD'", roles: [{role: "root", db: "admin"}]})' | |
sleep 10 | |
mongo --host "mongodb://localhost:27017/admin?replicaSet=appDB" -u $USER -p $PASSWORD --eval 'db.createUser({user: "'$OPS_USER'", pwd: "'$OPS_PASS'", roles: [{role: "readWriteAnyDatabase", db: "admin"}, {role: "dbAdminAnyDatabase", db: "admin"}, {role: "clusterAdmin", db: "admin"}]})' | |
# ======================================================================================== | |
# Download and install MongoDB Ops Manager | |
# ======================================================================================== | |
cd /tmp | |
curl -OL https://downloads.mongodb.com/on-prem-mms/rpm/mongodb-mms-4.4.14.100.20210610T1501Z-1.x86_64.rpm | |
sudo rpm -i mongodb-mms-*.rpm | |
sudo rm /tmp/mongodb-mms-*.rpm | |
# ======================================================================================== | |
# Configure Ops Manager | |
# ======================================================================================== | |
EMAIL="desterhuizen@gmail.com" | |
sudo sed -i "s/mongo.mongoUri.*/mongo.mongoUri=mongodb:\/\/$OPS_USER:$OPS_PASS@$HOST1:27017,$HOST2:27017,$HOST3:27017\/?maxPoolSize=150/g" /opt/mongodb/mms/conf/conf-mms.properties | |
echo "mms.ignoreInitialUiSetup=true | |
mms.centralUrl=http://$(hostname):8080 | |
mms.fromEmailAddr=$EMAIL | |
mms.replyToEmailAddr=$EMAIL | |
mms.adminEmailAddr=$EMAIL | |
mms.mail.transport=smtps | |
mms.mail.hostname=mail.example.com | |
mms.mail.port=465" | sudo tee -a /opt/mongodb/mms/conf/conf-mms.properties | |
# ======================================================================================== | |
# Start Ops Manager | |
# ======================================================================================== | |
sudo systemctl start mongodb-mms | |
# ======================================================================================== | |
# Check if the host is up | |
# ======================================================================================== | |
state=0 | |
while [ $state -eq 0 ]; do state=$(curl -s -IL http://localhost:8080 | grep 200 | wc -l); echo $state; sleep 5; done | |
# ======================================================================================== | |
# Install jq if needed | |
# ======================================================================================== | |
sudo yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm | |
sudo yum install jq -y | |
# ======================================================================================== | |
# Automatically Create a user with API keys | |
# ======================================================================================== | |
curl --digest --silent --header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request POST "http://localhost:8080/api/public/v1.0/unauth/users?pretty=true&whitelist=127.0.0.1&whitelist=<IP>" \ | |
--data '{ "username": "<username>","password": "<passwd<","firstName": "<name>","lastName": "<lastname>"}' > user.json | |
# ======================================================================================== | |
# Automatically Create a user with API keys | |
# ======================================================================================== | |
PRIVATE_KEY=$(cat user.json | jq -r '.programmaticApiKey.privateKey') | |
PUBLIC_KEY=$(cat user.json | jq -r '.programmaticApiKey.publicKey') | |
# ======================================================================================== | |
# Create a Programatic Key | |
# ======================================================================================== | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request POST "http://localhost:8080/api/public/v1.0/admin/apiKeys?pretty=true" \ | |
--data '{ "desc" : "Core Automation Key", "roles" : [ "GLOBAL_OWNER"] }' --output global_key.json | |
GLOBAL_PRIVATE_KEY=$(cat global_key.json | jq -r '.privateKey') | |
GLOBAL_PUBLIC_KEY=$(cat global_key.json | jq -r '.publicKey') | |
GLOBAL_KEY_ID=$(cat global_key.json | jq -r '.id') | |
# ======================================================================================== | |
# Create a Programatic Key whitelist | |
# ======================================================================================== | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://localhost:8080/api/public/v1.0/admin/whitelist?pretty=true" \ | |
--data ' { "cidrBlock" : "127.0.0.1/32", "description" : "OM" }' | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://localhost:8080/api/public/v1.0/admin/whitelist?pretty=true" \ | |
--data ' { "cidrBlock" : "<IP>/32", "description" : "home" }' | |
curl --silent --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://localhost:8080/api/public/v1.0/admin/whitelist?pretty=true" \ | |
--data ' { "cidrBlock" : "10.0.1.0/24", "description" : "Internal calls" }' | |
OM_URL="ip-10-0-1-56:8080" | |
ORG_NAME="Ops Manager" | |
# ======================================================================================== | |
# Automatically Create a orgnization with API keys | |
# ======================================================================================== | |
curl --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://$OM_URL/api/public/v1.0/orgs" \ | |
--data "{ \"name\" : \"$ORG_NAME\" }" --output organization.json | |
ORG_ID=$(cat organization.json | jq -r '.id') | |
# ======================================================================================== | |
# Create a new Project | |
# ======================================================================================== | |
PROJECT_NAME="OpsManagerDatabases" | |
curl --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://$OM_URL/api/public/v1.0/groups?pretty=true" \ | |
--data "{ \"name\" : \"$PROJECT_NAME\", \"orgId\": \"$ORG_ID\"}" --output om_project.json | |
PROJECT_ID=$(cat om_project.json | jq -r ".id") | |
# ======================================================================================== | |
# Create a org_API_KEY - | |
# ======================================================================================== | |
curl --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request POST "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/apiKeys?pretty=true" \ | |
--data '{ "desc" : "New API key for organization automation", "roles": ["ORG_OWNER"] }' \ | |
--output org_key.json | |
ORG_PRIVATE_KEY=$(cat org_key.json | jq -r '.privateKey') | |
ORG_PUBLIC_KEY=$(cat org_key.json | jq -r '.publicKey') | |
ORG_KEY_ID=$(cat org_key.json | jq -r '.id') | |
curl --user "$PUBLIC_KEY:$PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://$OM_URL/api/public/v1.0/orgs/$ORG_ID/apiKeys/$ORG_KEY_ID/whitelist?pretty=true" \ | |
--data '[{"cidrBlock": "10.0.0.0/16"} ]' | |
# ======================================================================================== | |
# Enable security | |
# ======================================================================================== | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--request GET "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig?pretty=true" \ | |
--output currentAutomationConfig.json | |
KEY=$(openssl rand -base64 32) | |
AUTO_PASSWORD=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 32) | |
jq '.auth.disabled = false' currentAutomationConfig.json > updated-conf.json | |
temp=$(mktemp) | |
jq '.auth.key = "'$KEY'"' updated-conf.json > "$temp" && mv "$temp" updated-conf.json | |
temp=$(mktemp) | |
jq '.auth.autoUser = "mms-automation"' updated-conf.json > "$temp" && mv "$temp" updated-conf.json | |
temp=$(mktemp) | |
jq '.auth.authoritativeSet = true' updated-conf.json > "$temp" && mv "$temp" updated-conf.json | |
temp=$(mktemp) | |
jq ".auth.autoPwd = \"$AUTO_PASSWORD\"" updated-conf.json > "$temp" && mv "$temp" updated-conf.json | |
temp=$(mktemp) | |
jq ".auth.keyfile = \"/var/lib/mongodb-mms-automation/keyfile\"" updated-conf.json > "$temp" && mv "$temp" updated-conf.json | |
temp=$(mktemp) | |
jq '.auth.keyfileWindows = "%SystemDrive%\\MMSAutomation\\versions\\keyfile"' updated-conf.json > "$temp" && mv "$temp" updated-conf.json | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request PUT "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig?pretty=true" \ | |
--data-binary "@./updated-conf.json" | |
# ======================================================================================== | |
# Create autmation_api_key | |
# ======================================================================================== | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request POST "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/agentapikeys?pretty=true" \ | |
--data ' { "desc": "Agent API Key for this project" }' --output autmation_api_key.json | |
OM_URL="ip-10-0-1-56:8080" | |
AGENT_OM_URL="http:\/\/ip-10-0-1-56:8080\/" | |
AGENT_KEY="60d1df751beaa013444bc75e6f215fe574d8db338f1502a368847706" | |
PROJECT_ID="60d1dd7d1beaa013444bc539" | |
# ======================================================================================== | |
# On target hosts install agents | |
# ======================================================================================== | |
curl -L $OM_URL/download/agent/automation/mongodb-mms-automation-agent-manager-latest.x86_64.rhel7.rpm -o /tmp/mongodb-mms-automation-agent-manager-latest.x86_64.rhel.rpm | |
sudo rpm -U /tmp/mongodb-mms-automation-agent-manager-latest.x86_64.rhel.rpm | |
sudo sed -i "s/mmsGroupId=.*/mmsGroupId=$PROJECT_ID/g" /etc/mongodb-mms/automation-agent.config | |
sudo sed -i "s/mmsApiKey=.*/mmsApiKey=$AGENT_KEY/g" /etc/mongodb-mms/automation-agent.config | |
sudo sed -i "s/mmsBaseUrl=.*/mmsBaseUrl=$AGENT_OM_URL/g" /etc/mongodb-mms/automation-agent.config | |
# ======================================================================================== | |
# Start and enable the agents | |
# ======================================================================================== | |
sudo systemctl start mongodb-mms-automation-agent.service | |
sudo systemctl enable mongodb-mms-automation-agent.service | |
# ======================================================================================== | |
# Check hosts can be extended to check individual host name | |
# ======================================================================================== | |
AGENTCOUNT=0 | |
while [[ $AGENTCOUNT -ne 3 ]]; do | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request GET "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/agents/AUTOMATION?pretty=true" --output agentList.json | |
AGENTCOUNT=$(cat agentList.json | jq ".totalCount") | |
sleep 10 | |
done | |
# ======================================================================================== | |
# Enable Monitoring Agents | |
# ======================================================================================== | |
# TODO: Replace with curl call | |
mongocli --profile <PROFILE>_org --projectId $PROJECT_ID ops-manager monitoring enable $DEP_HOST1 | |
mongocli --profile <PROFILE>_org --projectId $PROJECT_ID ops-manager monitoring enable $DEP_HOST2 | |
# ======================================================================================== | |
# INITIALISE Backup cluster | |
# ======================================================================================== | |
DEP_HOST1=$HOST1 | |
DEP_HOST2=$HOST2 | |
DEP_HOST3=$HOST3 | |
HOSTNAME1=$DEP_HOST1 | |
HOSTNAME2=$DEP_HOST2 | |
HOSTNAME3=$DEP_HOST3 | |
HOSTNAMEALT1=$DEP_HOST1 | |
HOSTNAMEALT2=$DEP_HOST2 | |
HOSTNAMEALT3=$DEP_HOST3 | |
# ======================================================================================== | |
# Build Template | |
# ======================================================================================== | |
echo '{ "name": "{name1}", "disabled": false, "manualMode": false, "processType": "mongod", "featureCompatibilityVersion": "{fcv}", "version": "{version}", "hostname": "{host1}", "logRotate" : { "sizeThresholdMB" : 1000, "timeThresholdHrs" : 24 }, "args2_6": { "net" : { "port": {port} }, "storage": { "dbPath": "{dbPath}" }, "systemLog": { "path": "{logPath}", "destination": "file" }, "replication": { "replSetName": "{rs_name}" } }, "authSchemaVersion": 5, "numCores": 0 } | |
{ "name": "{name2}", "disabled": false, "manualMode": false, "processType": "mongod", "featureCompatibilityVersion": "{fcv}", "version": "{version}", "hostname": "{host2}", "logRotate" : { "sizeThresholdMB" : 1000, "timeThresholdHrs" : 24 }, "args2_6": { "net" : { "port": {port} }, "storage": { "dbPath": "{dbPath}" }, "systemLog": { "path": "{logPath}", "destination": "file" }, "replication": { "replSetName": "{rs_name}" } }, "authSchemaVersion": 5, "numCores": 0 } | |
{ "name": "{name3}", "disabled": false, "manualMode": false, "processType": "mongod", "featureCompatibilityVersion": "{fcv}", "version": "{version}", "hostname": "{host3}", "logRotate" : { "sizeThresholdMB" : 1000, "timeThresholdHrs" : 24 }, "args2_6": { "net" : { "port": {port} }, "storage": { "dbPath": "{dbPath}" }, "systemLog": { "path": "{logPath}", "destination": "file" }, "replication": { "replSetName": "{rs_name}" } }, "authSchemaVersion": 5, "numCores": 0 } ' > process_template.json | |
echo '{ | |
"_id": "{rs_name}", | |
"members": [ | |
{ "_id":0, "host": "{name1}" }, | |
{ "_id":1, "host": "{name2}" }, | |
{ "_id":2, "host": "{name3}" } | |
], | |
"protocolVersion": "1" | |
}' > rs_template.json | |
# ======================================================================================== | |
# Configure template for use with the mongocli | |
# ======================================================================================== | |
VERSION="4.4.5-ent" | |
FCV="4.4" | |
DB_PATH="\/data\/bckdb\/" | |
LOG_PATH="\/data\/bckdb\/mongodb.log" | |
DB_PORT=27018 | |
RS_NAME="bckdb" | |
NAME1="${RS_NAME}_1" | |
NAME2="${RS_NAME}_2" | |
NAME3="${RS_NAME}_3" | |
sed -i "s/{host1}/$DEP_HOST1/g" process_template.json | |
sed -i "s/{host2}/$DEP_HOST2/g" process_template.json | |
sed -i "s/{host3}/$DEP_HOST3/g" process_template.json | |
sed -i "s/{version}/$VERSION/g" process_template.json | |
sed -i "s/{dbPath}/$DB_PATH/g" process_template.json | |
sed -i "s/{logPath}/$LOG_PATH/g" process_template.json | |
sed -i "s/{port}/$DB_PORT/g" process_template.json | |
sed -i "s/{name1}/$NAME1/g" process_template.json | |
sed -i "s/{name2}/$NAME2/g" process_template.json | |
sed -i "s/{name3}/$NAME3/g" process_template.json | |
sed -i "s/{rs_name}/$RS_NAME/g" process_template.json | |
sed -i "s/{fcv}/$FCV/g" process_template.json | |
sed -i "s/{rs_name}/$RS_NAME/g" rs_template.json | |
sed -i "s/{name1}/$NAME1/g" rs_template.json | |
sed -i "s/{name2}/$NAME2/g" rs_template.json | |
sed -i "s/{name3}/$NAME3/g" rs_template.json | |
# ======================================================================================== | |
# Create the cluster | |
# ======================================================================================== | |
rm currentAutomationConfig.json | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--request GET "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig?pretty=true" \ | |
--output currentAutomationConfig.json | |
rm update-conf.json | |
jq '.processes += $process_template' currentAutomationConfig.json --slurpfile process_template process_template.json > updated-conf.json | |
temp=$(mktemp) | |
jq '.replicaSets += $rs_template' updated-conf.json --slurpfile rs_template rs_template.json > "$temp" && mv "$temp" updated-conf.json | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request PUT "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig?pretty=true" \ | |
--data-binary "@./updated-conf.json" | |
# ======================================================================================== | |
# Enable tls in a project | |
# ======================================================================================== | |
rm currentAutomationConfig.json | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--request GET "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig?pretty=true" \ | |
--output currentAutomationConfig.json | |
jq '.tls.CAFilePath += "\/data\/keys\/mongodb-ca.crt"' currentAutomationConfig.json > updated-conf.json | |
temp=$(mktemp) | |
jq '.processes[].args2_6.net.tls.mode = "requireTLS"' updated-conf.json > "$temp" && mv "$temp" updated-conf.json | |
temp=$(mktemp) | |
jq '.processes[].args2_6.net.tls.certificateKeyFile = "\/data\/keys\/host.pem"' updated-conf.json > "$temp" && mv "$temp" updated-conf.json | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request PUT "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig?pretty=true" \ | |
--data-binary "@./updated-conf.json" | |
# ======================================================================================== | |
# Enable watch change applied | |
# ======================================================================================== | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--request GET "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationStatus" | |
# ======================================================================================== | |
# Enable backup and monitoring agents | |
# ======================================================================================== | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--request GET "http://$OM_URL/api/public/v1.0/softwareComponents/versions?pretty=true" | |
# ======================================================================================== | |
# Enable backup and monitoring agents | |
# ======================================================================================== | |
# Set the monitoring agent password | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request GET "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig/monitoringAgentConfig?pretty=true" \ | |
--output currentMonitoringConfig.json | |
jq ". | .username = \"mms-automation\" | .password = \"$AUTO_PASSWORD\"" currentMonitoringConfig.json > newMonitoringConfig.json | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request PUT "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig/monitoringAgentConfig?pretty=true" \ | |
--data-binary @newMonitoringConfig.json | |
# Set the backup agent password | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request GET "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig/backupAgentConfig?pretty=true" \ | |
--output currentBackupConfig.json | |
jq ". | .username = \"mms-automation\" | .password = \"$MONITORING_AGENT_PASSWORD\"" currentBackupConfig.json > newBackupConfig.json | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request PUT "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig/backupAgentConfig?pretty=true" \ | |
--data-binary @newBackupConfig.json | |
# Add all the hosts to both the monitoring and backyo version lists. | |
rm currentAutomationConfig.json | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--request GET "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig?pretty=true" \ | |
--output currentAutomationConfig.json | |
sed -i "s/monitoringVersions.*/monitoringVersions\": \[\{\"hostname\": \"$DEP_HOST1\" \}, \{\"hostname\": \"$DEP_HOST2\" \}, \{\"hostname\": \"$DEP_HOST3\" \}\],/g" currentAutomationConfig.json | |
sed -i "s/backupVersions.*/backupVersions\": \[\{\"hostname\": \"$DEP_HOST1\" \}, \{\"hostname\": \"$DEP_HOST2\" \}, \{\"hostname\": \"$DEP_HOST3\" \}\],/g" currentAutomationConfig.json | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request PUT "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig?pretty=true" \ | |
--data-binary "@./currentAutomationConfig.json" | |
# ======================================================================================== | |
# Create a backup user | |
# ======================================================================================== | |
BACKUP_USER=backup | |
BACKUP_PASS=password1234 | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--request GET "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig?pretty=true" \ | |
--output currentAutomationConfig.json | |
sed -i "s/usersWanted.*/usersWanted\": \[\{\"user\": \"$BACKUP_USER\", \"initPwd\": \"$BACKUP_PASS\", \"roles\": \[ \"readWriteAnyDatabase\"\], \"db\": \"admin\" \}\]/g" currentAutomationConfig.json | |
curl --user "$ORG_PUBLIC_KEY:$ORG_PRIVATE_KEY" --digest \ | |
--header "Accept: application/json" \ | |
--header "Content-Type: application/json" \ | |
--request PUT "http://$OM_URL/api/public/v1.0/groups/$PROJECT_ID/automationConfig?pretty=true" \ | |
--data-binary "@./currentAutomationConfig.json" | |
# ======================================================================================== | |
# Create a backup Daemon | |
# ======================================================================================== | |
curl --user "$GLOBAL_PUBLIC_KEY:$GLOBAL_PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request PUT "http://$OM_URL/api/public/v1.0/admin/backup/daemon/configs/$HOST1/?pretty=true" \ | |
--data "{ \"assignmentEnabled\" : true, \"backupJobsEnabled\" : false, \"configured\" : true, \"garbageCollectionEnabled\" : true, \"headDiskType\" : \"SSD\", \"machine\" : { \"headRootDirectory\" : \"/data/backup/\", \"machine\" : \"$HOST1\" }, \"numWorkers\" : 50, \"resourceUsageEnabled\" : true, \"restoreQueryableJobsEnabled\" : true }" | |
# ======================================================================================== | |
# Create a oplog store | |
# ======================================================================================== | |
curl --user "$GLOBAL_PUBLIC_KEY:$GLOBAL_PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://$OM_URL/api/public/v1.0/admin/backup/oplog/mongoConfigs?pretty=true" \ | |
--data "{ \"id\": \"oplog1\" , \"assignmentEnabled\" : true, \"encryptedCredentials\" : false, \"uri\" : \"mongodb://$BACKUP_USER:$BACKUP_PASS@$HOST1:27018,$HOST2:27018,$HOST3:27018\", \"ssl\" : false, \"writeConcern\" : \"MAJORITY\" }" | |
# ======================================================================================== | |
# Create a filesystem store | |
# ======================================================================================== | |
curl --user "$GLOBAL_PUBLIC_KEY:$GLOBAL_PRIVATE_KEY" --digest \ | |
--header 'Accept: application/json' \ | |
--header 'Content-Type: application/json' \ | |
--request POST "http://$OM_URL/api/public/v1.0/admin/backup/snapshot/fileSystemConfigs?pretty=true" \ | |
--data "{ \"assignmentEnabled\" : true, \"loadFactor\" : 50, \"mmapv1CompressionSetting\" : \"NONE\", \"storePath\" : \"/data/backup\", \"wtCompressionSetting\" : \"GZIP\" }" | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment