In this example I am spinning up 2 web servers and 2 file servers using Terraform. During the provision process, Terraform will run a remote-exec
script to bind the 4 new servers to the salt master server.
Substitute actual user and API key for SL_USERNAME
and SL_API_KEY
export TF_VAR_slusername="SL_USERNAME"
export TF_VAR_slapikey="SL_API_KEY"
You will need to change the default values specified for pub_vlan
and priv_vlan
to specify the vlans you would like the VSIs deployed on. You will also need to change the label
value specified for the sshkey. This needs to match the sshkey of the server you are running terraform from. This sshkey is used to log in to the remote hosts and run the post install script.
variable slusername {}
variable slapikey {}
# The target operating system for the web nodes
variable os {
default = "UBUNTU_LATEST_64"
}
# The number of cores each web virtual guest will recieve
variable vm_cores {
default = 1
}
# The amount of memory each web virtual guest will recieve
variable vm_memory {
default = 2048
}
variable pub_vlan {
default = xxxxx
}
# The private vlan to deploy the virtual guests on to
variable priv_vlan {
default = xxxxx
}
provider "ibm" {
softlayer_username = "${var.slusername}"
softlayer_api_key = "${var.slapikey}"
}
data "ibm_compute_ssh_key" "sshkey" {
label = "xxxxx"
}
resource "ibm_compute_vm_instance" "web" {
count = "${var.node_count}"
hostname = "web${count.index+1}"
domain = "${var.domainname}"
os_reference_code = "${var.os}"
datacenter = "${var.datacenter}"
network_speed = 1000
hourly_billing = true
private_network_only = false
cores = "${var.vm_cores}"
memory = "${var.vm_memory}"
disks = [100]
local_disk = false
public_vlan_id = "${var.pub_vlan}"
private_vlan_id = "${var.priv_vlan}"
ssh_key_ids = ["${data.ibm_compute_ssh_key.sshkey.id}"]
provisioner "file" {
source = "postinstall.sh"
destination = "/tmp/postinstall.sh"
}
provisioner "remote-exec" {
inline = [
"chmod +x /tmp/postinstall.sh",
"/tmp/postinstall.sh",
]
}
provisioner "file" {
source = "minion"
destination = "/etc/salt/minion"
}
}
resource "ibm_compute_vm_instance" "file" {
count = "${var.node_count}"
hostname = "file${count.index+1}"
domain = "${var.domainname}"
os_reference_code = "${var.os}"
datacenter = "${var.datacenter}"
network_speed = 1000
hourly_billing = true
private_network_only = false
cores = "${var.vm_cores}"
memory = "${var.vm_memory}"
disks = [100]
local_disk = false
public_vlan_id = "${var.pub_vlan}"
private_vlan_id = "${var.priv_vlan}"
ssh_key_ids = ["${data.ibm_compute_ssh_key.sshkey.id}"]
provisioner "file" {
source = "postinstall.sh"
destination = "/tmp/postinstall.sh"
}
provisioner "remote-exec" {
inline = [
"chmod +x /tmp/postinstall.sh",
"/tmp/postinstall.sh",
]
}
provisioner "file" {
source = "minion"
destination = "/etc/salt/minion"
}
}
This simple script installs the salt-minion deamon and sends its generated keys to the Salt master server
#!/usr/bin/env bash
curl -L https://bootstrap.saltstack.com -o install_salt.sh
sh install_salt.sh -P -A saltmaster.cde.services
mv /etc/salt/minion /etc/salt/minion.bak
This is the minion file that will be put on the salt-minion machines. Needs to be adjusted to match your salt-master server name (or IP) and salt-master key fringerprint. You can get the correct key by running the following command on your salt-master server salt-key -F SALT_MASTER_NAME
.
master: saltmaster.cde.services
master_finger: 'b8:3d:c0:83:42:b8:e1:f3:f9:db:59:59:d4:44:0f:ca:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx'
For instance on my testing box it looks like this (The key you are interested in is the master.pub
key):
root@saltmaster:~# salt-key -F saltmaster
Local Keys:
master.pem: 73:4c:a4:4c:5d:79:79:d9:72:c8:cd:6a:3f:a1:56:cb:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx
master.pub: b8:3d:c0:83:42:b8:e1:f3:f9:db:59:59:d4:44:0f:ca:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx
Running terraform apply will now provision the 4 new nodes and run the post install script. Once it completes you can run the following on the Salt-master server to verify the minions have had their keys sent:
root@saltmaster:~# salt-key --list all
Accepted Keys:
saltmaster.cde.services
Denied Keys:
Unaccepted Keys:
db1.cde.services
db2.cde.services
web1.cde.services
web2.cde.services
Rejected Keys:
You can now accept the minion keys.
root@saltmaster:/srv/salt# salt-key -A
The following keys are going to be accepted:
Unaccepted Keys:
db1.cde.services
db2.cde.services
web1.cde.services
web2.cde.services
Proceed? [n/Y] y
Key for minion db1.cde.services accepted.
Key for minion db2.cde.services accepted.
Key for minion web1.cde.services accepted.
Key for minion web2.cde.services accepted.
So in my test I have 2 web servers and 2 db servers. I have set up the following /srv/salt/top.sls
file:
root@saltmaster:/srv/salt# cat /srv/salt/top.sls
base:
'*':
- zsh
'web*':
- nginx
'db*':
- mysql
All hosts get zsh installed. Hosts that begin with web get nginx installed and hosts that begin with db get mysql-server and mysql-client installed.
root@saltmaster:/srv/salt# tree -L 2 /srv/salt
/srv/salt
├── mysql
│ └── init.sls
├── nginx
│ └── init.sls
├── top.sls
└── zsh
└── init.sls
To target all the machines run salt '*' state.apply
snippet of output
----------
ID: mysql-server
Function: pkg.installed
Result: True
Comment: The following packages were installed/updated: mysql-server
Started: 19:57:43.961452
Duration: 32283.098 ms
Changes:
----------
libaio1:
----------
new:
0.3.110-2
old:
libcgi-fast-perl:
----------
new:
1:2.10-1
old:
libcgi-pm-perl:
----------
new:
4.26-1
old:
libencode-locale-perl:
----------
new:
1.05-1
old:
libevent-core-2.0-5:
----------
new:
2.0.21-stable-2ubuntu0.16.04.1
old:
libfcgi-perl:
----------
new:
0.77-1build1
old:
libhtml-parser-perl:
----------
new:
3.72-1
old:
libhtml-tagset-perl:
----------
new:
3.20-2
old:
libhtml-template-perl:
----------
new:
2.95-2
old:
libhttp-date-perl:
----------
new:
6.02-1
old:
libhttp-message-perl:
----------
new:
6.11-1
old:
libio-html-perl:
----------
new:
1.001-1
old:
liblwp-mediatypes-perl:
----------
new:
6.02-1
old:
libtimedate-perl:
----------
new:
2.3000-2
old:
liburi-perl:
----------
new:
1.71-1
old:
mysql-client-5.7:
----------
new:
5.7.20-0ubuntu0.16.04.1
old:
mysql-client-core-5.7:
----------
new:
5.7.20-0ubuntu0.16.04.1
old:
mysql-common:
----------
new:
5.7.20-0ubuntu0.16.04.1
old:
mysql-common-5.6:
----------
new:
1
old:
mysql-server:
----------
new:
5.7.20-0ubuntu0.16.04.1
old:
mysql-server-5.7:
----------
new:
5.7.20-0ubuntu0.16.04.1
old:
mysql-server-core-5.7:
----------
new:
5.7.20-0ubuntu0.16.04.1
old:
timedate:
----------
new:
1
old:
virtual-mysql-client:
----------
new:
1
old:
virtual-mysql-client-core:
----------
new:
1
old:
virtual-mysql-server:
----------
new:
1
old:
virtual-mysql-server-core:
----------
new:
1
old:
----------
ID: mysql-common
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 19:58:16.256561
Duration: 1363.267 ms
Changes:
----------
ID: mysql-client
Function: pkg.installed
Result: True
Comment: The following packages were installed/updated: mysql-client
Started: 19:58:17.624257
Duration: 2444.284 ms
Changes:
----------
mysql-client:
----------
new:
5.7.20-0ubuntu0.16.04.1
old:
Summary for db2.cde.services
------------
Succeeded: 4 (changed=3)
Failed: 0
------------
Total states run: 4
Total run time: 49.533 s