Skip to content

Instantly share code, notes, and snippets.

@HebertCL
Last active December 24, 2022 10:20
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save HebertCL/d63f66565151931c5878a9e5e2516da1 to your computer and use it in GitHub Desktop.
Save HebertCL/d63f66565151931c5878a9e5e2516da1 to your computer and use it in GitHub Desktop.
Issue 5344
# This playbook was meant to be executed after provisioning.
# Initially client side certificates where created during
# image cooking, but soon figured out it would not work in
# the long run. When I changed it to post provision execution
# I found errors that mentioned certificates already existed
# and found the issue described in GitHub.
---
- hosts: all
become: yes
tasks:
- name: Create client config folder
file:
path: /etc/openvpn/client/{{ item }}
state: directory
mode: 0755
with_items:
- hcuellar
- default
- name: Config ovpn client file
shell:
cmd: |
cat >{{ item }}.ovpn <<EOF
client
dev tun
proto udp
#Server IP and Port
remote vpn.crabi.com 1194
resolv-retry infinite
nobind
persist-key
persist-tun
mute-replay-warnings
ca ca.crt
cert {{ item }}.crt
key {{ item }}.key
tls-auth ta.key 1
EOF
args:
chdir: /etc/openvpn/client/{{ item }}
with_items:
- hcuellar
- default
- name: Create client side certificates
shell: ./3/easyrsa build-client-full {{ item }} nopass
args:
chdir: /etc/openvpn/certs/
with_items:
- hcuellar
- default
- name: Copy client certificate and key
shell: find ./certs/ -name "{{ item }}.*" -exec cp {} /etc/openvpn/client/{{ item }} \;
args:
chdir: /etc/openvpn/
with_items:
- hcuellar
- default
- name: Copy server certificates and keys
shell: cp {{ item }} /etc/openvpn/client/
with_items:
- /etc/openvpn/certs/pki/ca.crt
- /etc/openvpn/certs/pki/ta.key
- name: Setup permissions to SCP and retrieve certificates
file:
path: /etc/openvpn/
mode: 0755
# This is my IGM module code. Again, I use null resources for chaining
# capabilities with other small modules like template or autoscaler.
resource "google_compute_instance_group_manager" "gcp_managed_group" {
depends_on = [
"null_resource.provision_ready",
]
name = var.gcp_igm_name
base_instance_name = var.gcp_igm_instance_name
zone = var.gcp_igm_zone
version {
name = var.gcp_igm_version_name
instance_template = var.gcp_igm_version_template
}
auto_healing_policies {
health_check = var.gcp_igm_health_check
initial_delay_sec = var.gcp_igm_initial_delay
}
target_pools = [google_compute_target_pool.gcp_target_pool.self_link]
target_size = var.gcp_igm_target_size
}
# Define instance target pool
resource "google_compute_target_pool" "gcp_target_pool" {
depends_on = [
"null_resource.provision_ready",
]
name = var.gcp_target_pool_name
}
resource "null_resource" "provision_ready" {
triggers = {
provision_ready = var.provision_ready
}
}
resource "null_resource" "autoscaler_complete" {
depends_on = [
"null_resource.provision_ready",
"google_compute_instance_group_manager.gcp_managed_group",
"google_compute_target_pool.gcp_target_pool",
]
}
# Source required network data from base layer
data "terraform_remote_state" "network" {
backend = "gcs"
config = {
bucket = "tf-admin-state"
prefix = "base-layer/network/"
}
}
# Create the necessary firewall route
module "pritunl_firewall" {
source = "/modules/google_network/firewall_allow"
provision_ready = var.provision_ready
gcp_firewall_name = var.pritunl_firewall_name
gcp_firewall_network = data.terraform_remote_state.network.outputs.vpc_self_link
gcp_firewall_allowed = var.pritunl_firewall_allowed
gcp_firewall_source_ranges = var.pritunl_firewall_source_ranges
gcp_firewall_target_tags = var.pritunl_firewall_target_tags
}
# Create external, static ip
module "pritunl_ip" {
source = "/modules/google_network/compute_address"
provision_ready = var.provision_ready
gcp_address_name = var.gcp_address_name
}
# Create a compute health check for LB
module "pritunl_health_check" {
source = "/modules/google_compute/health-check"
provision_ready = var.provision_ready
gcp_health_check_name = var.gcp_health_check_name
gcp_health_check_tcp_port = var.gcp_health_check_tcp_port
}
# Provision instance template
module "pritunl_template" {
source = "/modules/google_compute/instance-template"
provision_ready = module.pritunl_firewall.firewall_allow_complete
gcp_template_name = var.gcp_template_name
gcp_template_project = var.gcp_project
gcp_template_startup_script = file(var.gcp_template_startup_script)
gcp_template_disk_image = var.gcp_template_disk_image
gcp_template_target_tags = module.pritunl_firewall.firewall_allow_tags
gcp_template_subnetwork = data.terraform_remote_state.network.outputs.vpn_self_link
gcp_template_nat_ip = module.pritunl_ip.address_ip
gcp_template_subnetwork_project = var.gcp_project
}
# Provision instance group manager and target pool
module "pritunl_igm" {
source = "/modules/google_compute/instance-group-manager"
provision_ready = module.pritunl_template.template_complete
gcp_igm_version_name = var.gcp_igm_version_name
gcp_igm_version_template = module.pritunl_template.template_self_link
gcp_igm_name = var.gcp_igm_name
gcp_igm_zone = var.gcp_zone
gcp_igm_instance_name = var.gcp_igm_instance_name
gcp_igm_health_check = module.pritunl_health_check.health_check_self_link
gcp_target_pool_name = var.gcp_target_pool_name
}
# Install all packages I need for OpenVPN
# plus sets up certificates and openvpn.conf
---
- hosts: all
become: yes
tasks:
- name: Install VPN and certificate required packages
yum:
state: present
name:
- https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
- openssl
- openvpn
- easy-rsa
- iptables
- net-tools
- bridge-utils
- name: Create directory for certificates
file:
path: /etc/openvpn/certs
state: directory
mode: '0755'
- name: Copy easy-rsa to openvpn folder
shell: cp -R /usr/share/easy-rsa/* /etc/openvpn/certs
args:
creates: /etc/openvpn/certs/3
- name: Initialize PKI
shell: ./3/easyrsa init-pki
args:
chdir: /etc/openvpn/certs
creates: /etc/openvpn/certs/pki
- name: Create OpenVPN CA
shell: echo "ca\n" | ./3/easyrsa build-ca nopass
args:
chdir: /etc/openvpn/certs
creates: /etc/openvpn/certs/pki/ca.crt
- name: Create OpenVPN server certificate
shell: ./3/easyrsa build-server-full server nopass
args:
chdir: /etc/openvpn/certs
creates: /etc/openvpn/certs/issued/server.crt
- name: Create Diffie Hellman key
shell: ./3/easyrsa gen-dh
args:
chdir: /etc/openvpn/certs
creates: /etc/openvpn/certs/dh.pem
- name: Create CRL key
shell: ./3/easyrsa gen-crl
args:
chdir: /etc/openvpn/certs
creates: /etc/openvpn/certs/crl.pem
- name: Generate OpenVPN TLS key
shell: openvpn --genkey --secret /etc/openvpn/certs/pki/ta.key
args:
creates: /etc/openvpn/certs/pki/ta.key
- name: Setup server config
shell:
cmd: |
cat >/etc/openvpn/server.conf <<EOF
# https://github.com/OpenVPN/openvpn/blob/master/sample/sample-config-files/server.conf #
port 1194
proto udp
dev tun
ca certs/pki/ca.crt
cert certs/pki/issued/server.crt
key certs/pki/private/server.key
dh certs/pki/dh.pem
server 192.168.200.0 255.255.255.0
ifconfig-pool-persist ipp.txt
# Crabi Admin routes
push "route 192.168.100.0 255.255.255.0"
push "route 192.168.0.0 255.255.255.0"
push "route 10.0.0.0 255.255.255.0"
push "route 10.0.32.0 255.255.255.0"
# Crabi Dev routes
push "route 192.168.101.0 255.255.255.0"
push "route 192.168.4.0 255.255.255.0"
push "route 10.1.0.0 255.255.255.0"
push "route 10.1.32.0 255.255.255.0"
# Crabi QA routes
push "route 192.168.102.0 255.255.255.0"
push "route 192.168.8.0 255.255.255.0"
push "route 10.2.0.0 255.255.255.0"
push "route 10.2.32.0 255.255.255.0"
# Crabi Preprod routes
push "route 192.168.103.0 255.255.255.0"
push "route 192.168.12.0 255.255.255.0"
push "route 172.16.0.0 255.255.255.0"
push "route 172.16.32.0 255.255.255.0"
# Crabi Prod routes
push "route 192.168.104.0 255.255.255.0"
push "route 192.168.16.0 255.255.255.0"
push "route 172.16.100.0 255.255.255.0"
push "route 172.16.132.0 255.255.255.0"
# Crabi Sandbox routes
push "route 192.168.105.0 255.255.255.0"
push "route 129.168.20.0 255.255.255.0"
push "route 10.3.0.0 255.255.255.0"
push "route 10.3.32.0 255.255.255.0"
# DNS servers.
push "dhcp-option DNS 208.67.222.222"
push "dhcp-option DNS 208.67.220.220"
keepalive 10 120
tls-auth /etc/openvpn/certs/pki/ta.key 0
cipher AES-256-CBC
duplicate-cn
persist-key
persist-tun
status /var/log/openvpn-status.log
log-append /var/log/openvpn.log
verb 3
explicit-exit-notify 1
remote-cert-eku "TLS Web Client Authentication"
EOF
args:
creates: /etc/openvpn/server.conf
{
"variables": {
"account_file": "",
"project_id": "",
"region": "us-west2",
"zone": "us-west2-a"
},
"builders": [
{
"type": "googlecompute",
"account_file": "{{user `acount_file`}}",
"project_id": "{{user `project_id`}}",
"source_image_family": "centos-7",
"machine_type": "n1-standard-1",
"region": "{{user `region`}}",
"zone": "{{user `zone`}}",
"image_name": "openvpn-server-{{timestamp}}",
"image_description": "OpenVPN server",
"disk_type": "pd-standard",
"disk_size": 100,
"ssh_username": "packer"
}
],
"provisioners": [
{
"type": "shell",
"inline": ["sudo yum install -y ansible"]
},
{
"type": "ansible-local",
"playbook_file": "ovpn.yaml"
},
{
"type": "ansible-local",
"playbook_file": "postconf.yaml"
}
]
}
# Configure ip forwarding, firewalld
# and sets up openvpn as system service
---
- hosts: all
become: yes
tasks:
- name: Configure IP forwarding
sysctl:
name: net.ipv4.ip_forward
value: '1'
- name: Reload network service
shell: |
echo "net.ipv4.ip_forward = 1" > /etc/sysctl.d/10-ipv4_forward.conf
sysctl --system
systemctl restart network.service
- name: Configure firewall port exception
firewalld:
port: 1194/udp
permanent: yes
state: enabled
- name: Configure firewall service
firewalld:
zone: trusted
service: openvpn
permanent: yes
state: enabled
- name: Add masquerade
firewalld:
masquerade: yes
permanent: yes
state: enabled
- name: Enable OpenVPN at system startup
service:
name: openvpn@server.service
enabled: yes
# This is my config script. It is plain config taken from https://pritunl.com/
# and all it does is get packages installed so I have to go on, login and create
# all my vpn setup and clients manually
sudo tee /etc/yum.repos.d/mongodb-org-4.0.repo << EOF
[mongodb-org-4.0]
name=MongoDB Repository
baseurl=https://repo.mongodb.org/yum/redhat/7/mongodb-org/4.0/x86_64/
gpgcheck=1
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-4.0.asc
EOF
sudo tee /etc/yum.repos.d/pritunl.repo << EOF
[pritunl]
name=Pritunl Repository
baseurl=https://repo.pritunl.com/stable/yum/centos/7/
gpgcheck=1
enabled=1
EOF
sudo rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
gpg --keyserver hkp://keyserver.ubuntu.com --recv-keys 7568D9BB55FF9E5287D586017AE645C0CF8E292A
gpg --armor --export 7568D9BB55FF9E5287D586017AE645C0CF8E292A > key.tmp; sudo rpm --import key.tmp; rm -f key.tmp
sudo yum -y install pritunl mongodb-org
sudo systemctl start mongod pritunl
sudo systemctl enable mongod pritunl
# This is my instance template module.
# Like I said, I use null resources to chain the provisioning
# of this module with any other resource I may want to create
resource "google_compute_instance_template" "gcp_instance_template" {
depends_on = [
"null_resource.provision_ready",
]
name = var.gcp_template_name
project = var.gcp_template_project
machine_type = var.gcp_template_machine
tags = flatten([var.gcp_template_target_tags])
disk {
auto_delete = false
source_image = var.gcp_template_disk_image
disk_name = var.gcp_template_name
disk_type = var.gcp_template_disk_type
disk_size_gb = var.gcp_template_disk_size
type = var.gcp_template_gce_disk
}
network_interface {
subnetwork = var.gcp_template_subnetwork
subnetwork_project = var.gcp_template_subnetwork_project
access_config {
nat_ip = var.gcp_template_nat_ip
}
}
service_account {
scopes = var.gcp_template_sa_scopes
}
scheduling {
preemptible = var.gcp_template_preemptible
automatic_restart = var.gcp_template_restart
}
lifecycle {
create_before_destroy = true
}
}
resource "null_resource" "provision_ready" {
triggers = {
provision_ready = var.provision_ready
}
}
resource "null_resource" "template_complete" {
depends_on = [
"null_resource.provision_ready",
"google_compute_instance_template.gcp_instance_template",
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment