Skip to content

Instantly share code, notes, and snippets.

@weldpua2008
Last active May 19, 2021 13:59
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 2 You must be signed in to fork a gist
  • Save weldpua2008/80a1fd0584f0eac8507ef502bb779755 to your computer and use it in GitHub Desktop.
Save weldpua2008/80a1fd0584f0eac8507ef502bb779755 to your computer and use it in GitHub Desktop.
Ansible complex examples

Ansible Vars Operations

Useful VARs

  • ansible_processor_vcpus
  • hostvars[inventory_hostname]
  • inventory_hostname
  • ansible_os_family
  • ansible_distribution
  • ansible_distribution_major_version

Create dict based on dict

---
# run as: ansible-playbook -i localhost, -c local playbook.yml
- hosts: all
  gather_facts: false
  vars:
    RedisInstances:
        - { name: 'users' }
        - { name: 'proxy' }
    RedisPacemakerDefault:
        type: "ocf:heartbeat:redis"
        masterslave: true
        op:
          - action: "monitor"
            options:
              role: "Master"
              timeout: 60s
              interval: 10s
          - action: "op monitor"
            options:
              role: "Slave"
              timeout: 60s
              interval: 20s
    _pacemaker_resources: |
        {% set merged=[] %}
        {%- for redisInstance in RedisInstances -%}
        {%- if "name" in redisInstance -%}
        {% set _ = merged.append(  {'id': redisInstance['name'] }|combine(RedisPacemakerDefault) ) %}
        {% endif %}
        {% endfor %}{{merged}}
    pacemaker_resources: "{{_pacemaker_resources}}"
    # - id: users
    #   type: "ocf:heartbeat:redis"
    #   op:
    #   - action: "monitor"
    #    options:
    #      role: "Master"
    #      timeout: 60s
    #      interval: 10s
    #  - action: "op monitor"
    #    options:
    #      role: "Slave"
    #      timeout: 60s
    #      interval: 20s
  tasks:
    - name: Debug var
      shell: echo {{item}}
      with_items: "{{pacemaker_resources|trim}}"
    - name: Display pacemaker_resources
      debug:
        var: pacemaker_resources

Create List of public IPs of the ansible control host

- name: set ansible control host public interfaces fact
  local_action:
    module: set_fact
    ansible_control_host_interface: |
      {%- for interface in ansible_interfaces| select('match', '^(eth|bond)[0-9]+') | list  -%}
      {% set _int = 'ansible_%s' | format(interface) %}
      {%- if _int in hostvars[inventory_hostname] and 'ipv4' in  hostvars[inventory_hostname][_int] and  hostvars[inventory_hostname][_int]['ipv4']['address
'] is defined and hostvars[inventory_hostname][_int]['ipv4']['address'] | ipaddr('public') -%}
      {{ _int|trim}}{% break %}
      {% endif %}
      {% endfor %}
  when: ansible_control_host_address is not defined

Template Operations

Generate NGINX Upstreams IPs from Ansible Group

Generates upstreams IPs or uses serverIp var from inventory template.conf.j2:

upstream http_areq {
    server 127.0.0.1:3000;
{% if 'A_SERVERS' in groups %}
{% for node in groups['BACKEND_SERVERS'] | difference([inventory_hostname])|shuffle %}
    {% set resolved_ip = lookup('pipe','getent hosts ' +  hostvars[node].serverDomain |quote + '|awk \'{ print $1 }\' '  ) %}
    {% if  resolved_ip == '' %}
    {% set resolved_ip =  hostvars[node].serverIp %}
    {% endif %}
    server {{ resolved_ip }}:80 backup;
{% endfor %}
{% endif %}
    keepalive 50;
}

Generate List from hosts

{% set merged = [] + ANSIBLE_GROUP_1|default([]) + ANSIBLE_GROUP_2|default([]) %}
HOSTS=( {% for node in ANSIBLE_GROUP_SOME|default(merged) %}{{node}} {% endfor %} )

using loop.first

{% for node in groups['SERVERS']|default([]) %}
{% if loop.first %}MEMSQL_HOST={{node}}{% endif %}{% endfor %}
{% endif %}

Control comma by Jinja

# will create list 
{% set comma = joiner(",") %}
[
{%- for var in some_list -%}
{{comma() }}{{var}}
]
{% endfor %}

Working with functions

{% macro map(group, node, TUN_RESERVEDIPS, INTERNAL_RESERVEDIPS) %}
{% set _tun_prefix = [] %}
{% if group in ['EXTERNAL_SERVERS'] %}
{% set _tun_prefix = [1000] %}
{% elif group in ['EXTERNAL_INTERNAL_SERVERS'] %}
{% set _tun_prefix = [2000] %}
{% elif group in ['EXTERNAL_AWS_SERVERS'] %}
{% set _tun_prefix = [3000] %}
{% endif %}
{% set _privateip_added = [false] %}
{% set _privateip = [] %}{% set _tunsufix = node | regex_findall('(?:[0-9]{1,3})') | join('')|int  %}{% set _tunnum = _tun_prefix[0]|int + _tunsufix|int %}{% set _ext_ip_indx = _tunnum|int  % 2  %}
    - tun:
        "fw_tun_name":  "tun{{ _tunnum }}"
        "fw_vip_ip":  "{{ EXTERNAL_EXT_IPS[_ext_ip_indx|int]}}"
        "fw_tungroup":  "tun-gr-{{ EXTERNAL_EXT_VIP[_ext_ip_indx|int]}}"
        "fw_vip_ext":  "{{ EXTERNAL_EXT_VIP[_ext_ip_indx|int]}}"   
        "fw_vip_int":  "{{ EXTERNAL_INT_VIP[_ext_ip_indx|int]}}"   
        "remote_server_tun":  "{{ EXTERNAL_SERVER_TUN_NAME[_ext_ip_indx]}}"
{% for interface in hostvars[node]["ansible_interfaces"]|default([]) %}{% if interface not in ['docker', 'lo'] %}{% set _int = 'ansible_%s' | format(interface) %}{% if _int in hostvars[node] and 'ipv4' in  hostvars[node][_int] and  hostvars[node][_int]['ipv4']['address'] is defined %}
{% for reserved_net in TUN_RESERVEDIPS|default([]) %}
{% if  hostvars[node][_int]['ipv4']['address'] | ipaddr('private') and not hostvars[node][_int]['ipv4']['address'] | ipaddr(reserved_net)  %}        - { "remote_server_private_ip": "{{hostvars[node][_int]['ipv4']['address']}}" }
{% set _ = _privateip_added.append(true) %}{% endif %}{% endfor %}
{% if  hostvars[node][_int]['ipv4']['address'] | ipaddr('public') %}        "remote_server_public_ip": "{{hostvars[node][_int]['ipv4']['address']}}"
{% endif %}{% endif %}{% endif %}{% endfor %}
{% if not _privateip_added[-1] %}{% set t = INTERNAL_START_IP[-1]|int+1 %}{% set _ = INTERNAL_START_IP.append(t) %}
        "remote_server_private": "{{INTERNAL_NETWORK[0]}}.{{INTERNAL_START_IP[-1]}}"
{% endif %}
{% if 'ansible_hostname' in hostvars[node] %}
        "remote_server_node": "{{hostvars[node]['ansible_hostname']}}.cedato.local"
{% else %}        
        "remote_server_node": "{{node}}"
{% endif %}
        "remote_server_hostname": "{{node}}"
        "ansible_group": "{{group}}"
{% endmacro %}
{% for node in groups['all']|default([]) %}
{{ map(group,node) }}{% endfor %}{% endfor %}

Generate Allowed Hosts for X-Real-IP

template.conf.j2:

real_ip_header X-Real-IP;
real_ip_recursive on;
set_real_ip_from  127.0.0.1;
{% if 'LOAD_BALANCER_SERVERS' in groups %}
{% for node in groups['LOAD_BALANCER_SERVERS']| difference([inventory_hostname])  %}
# {{node}}
{% set resolved_ip = lookup('pipe','getent hosts ' +  hostvars[node].serverDomain |quote + '|awk \'{ print $1 }\' '  ) %}
{% if  resolved_ip != '' and resolved_ip | ipaddr('public') %}
set_real_ip_from {{ resolved_ip }};
{% endif %}
{% set resolved_ip_local = lookup('pipe','getent hosts ' +  node |quote + '|awk \'{ print $1 }\' '  ) %}
{% if  resolved_ip_local != '' and resolved_ip_local | ipaddr('private') %}
set_real_ip_from {{ resolved_ip_local }};
{% endif %}
{% endfor %}
{% endif %}

Using pipe in template to count md5sum

#!/usr/bin/env bash
################################
# Checking the GEO-DB
################################
exit_code=0
{% for db_file in  maxmind_databases_hadoop %}
if [[ $(md5sum "/opt/cloudera/maxmind/{{db_file}}"|cut -d ' ' -f 1) != "{{ lookup('pipe', 'md5sum /etc/Ansible/cedato/roles/geo-db-maxmind/files/'+ db_file +'|cut -d " " -f 1' ) }}" ]];then
	echo "/opt/cloudera/maxmind/{{db_file}} check failed"
	exit_code=$((exit_code+1))
fi
{% endfor %}

exit $exit_code

Tasks Operations

Faster Create Folder if not exists

Create folders from the following vars(user=nginx group=nginx):

APP_FOLDER
APP_DATA_FOLDER
- name: Check if path exists
  stat: path={{ item }} get_checksum=no get_md5=no get_checksum=no get_attributes=no get_mime=no
  with_items: "{{ APP_FOLDER|default([]) + APP_DATA_FOLDER|default([]) }}"
  register: paths

- name: Create folder
  file: path={{ item.item }} state=directory owner=nginx group=nginx
  with_items: "{{ paths.results }}"
  when: item.stat.exists == false

Git checkout with rollback

Uses following vars: git_branch - branch, tag, or commit to checkout git_repo - git repository git_forcepull - if pit pull force

- name: Get previous version
  shell: "if [[ -e '/code/git/.git' ]];then cd /code/git; git describe --exact-match --tags $(git log -n1 --pretty='%h') 2> /dev/null || git rev-parse HEAD ; fi"
  register: _previous_version
  ignore_errors: yes

- name: set facts
  set_fact:
    git_prev_ver: "{{_previous_version.stdout|default('failed')}}"

- block:
    - name: Get updated files from git repository ({{git_repo}})
      git: repo={{git_repo}}  dest=/code/git version={{git_branch|default('master')}} force={{git_forcepull|default('yes')}}
      register: gitresult
    - debug: msg="SHA-1 before git update is {{ gitresult.before }}"
    - debug: msg="SHA-1 after git update is {{ gitresult.after }}"

  rescue:
    - name: Rollback files from git repository ({{git_repo}}) to Commit-ID {{ gitresult.before| default('undefined') }}
      git: repo={{git_repo}} dest=/code/git version={{ gitresult.before }} force={{git_forcepull|default('yes')}}
      when: gitresult.before| default('undefined') != 'undefined'

    - name: Rollback git repository ({{git_repo}}) to Commit-ID {{ git_prev_ver| default('undefined') }} (OLD)
      git: repo={{git_repo}} dest=/code/git version={{ git_prev_ver }} force={{git_forcepull|default('yes')}}
      when: gitresult.before| default('undefined') == 'undefined' and prev_ver| default('undefined') != 'undefined'
      
    - name: Should Fail always
      command: /bin/false

Using with_nested

#openresty_instances:
#  - name: "stage"
#    worker_processes: 1
#    server_names_hash_max_size: 512        
#    nginx_http_end_options: |
#        proxy_temp_path /opt/volume1/nginx_tmp;
#        client_body_temp_path  /opt/volume1/nginx_tmp;
#  - name: "production"
#    fastcgi_env: "prod"
#    worker_processes: 2
#    server_names_hash_max_size: 512        
#    nginx_http_end_options: |
#        proxy_temp_path /opt/volume1/nginx_tmp;
#        client_body_temp_path  /opt/volume1/nginx_tmp;
 name: Creating env folders conf.d/  
  file:
    path: "{{openresty_configuration_temp}}/{{ item[1]}}{{ item[0].name }}.d"
    state: directory
    mode: 0644  
  with_nested: 
    - "{{openresty_instances}}"
    - ["conf.d/domains.", "conf.d/locations.", "conf.d/upstreams.","includes."]

Using with_filetree

- name: Building dynamic configuration
  template:
    src: "{{ item.src }}"    
    dest: "{{openresty_configuration_temp }}/{{ item.path | regex_replace('\\.j2','') }}"
    owner: nginx
    group: nginx
  with_filetree:
      - ../templates/nginx/

Distribute ssh key to user web

- name: Create group 'web'
  group:
    name: web
    state: present

- name: Create user 'web' (for rocketeer deployments)
  user:
    name: web
    comment: "Web User"
    group: web
    shell: /bin/bash
    home: /home/web
    password: "{{ lookup('pipe', 'date +%Y%m%d-%H%M')|password_hash('sha512') }}"
    update_password: on_create
    state: present
- name: Add the user 'web' appending the group 'nginx' and 'web'
  user:
    name: web    
    groups: nginx,web
    append: yes    
- name: Create a 2048-bit SSH key for user 'web' in ~/.ssh/id_rsa
  user:
    name: web       
    generate_ssh_key: yes
    ssh_key_bits: 2048
    ssh_key_file: .ssh/id_rsa

- name: Slurp ssh key file for user 'web'
  slurp:
    src: /home/web/.ssh/id_rsa.pub
  register: sshpubkey_slurpfile
- name: Copy public ssh key to authorized_key to loopback connection
  authorized_key: user=web key="{{ sshpubkey_slurpfile['content'] | b64decode }}" 

Update XDebug configuration

- name: insert/update XDebug configuration
  blockinfile:
    path: /etc/php.ini
    marker: "; {mark} ANSIBLE MANAGED BLOCK"
    state: present
    block: |
      [Xdebug]
      zend_extension="/opt/remi/php56/root/usr/lib64/php/modules/xdebug.so"
      xdebug.profiler_output_dir = "/tmp/xdebug/"
      xdebug.profiler_enable = On
      xdebug.remote_enable=On
      xdebug.remote_host="localhost"
      xdebug.remote_port=9000
      xdebug.remote_handler="dbgp"
      xdebug.remote_autostart=On
    # validate: php -e -c /etc/php.ini  -r 'echo "OK\n";';
  ignore_errors: yes

Update Jenkins Port

- name: Set the Jenkins JENKINS_PORT
  lineinfile:
    dest: "{{ jenkins_init_file }}"
    regexp: '^JENKINS_PORT=.*'
    line: 'JENKINS_PORT={{ JENKINS_PORT| default("8080") }}'
  notify: restart jenkins  

Find by pattern

- name: Find openresty startup scripts in /etc/init.d
  find:
    paths: /etc/init.d
    patterns: 'openresty.*'
  register: mi_status
- name: Make sure OpenResty is stopped
  service: name=openresty state=stopped
  when: "mi_status.matched|default(0) < 1"

Detect if service is running

CentOs based OS
- name: Check if OpenResty is running...
  shell: service openresty status warn=false
  register: openresty_service
  failed_when: openresty_service.rc != 0 and ("unrecognized service" not in openresty_service.stderr)
  ignore_errors: true
  - name: Make sure OpenResty service is stopped
  service: name=openresty state=stopped
  when: "openresty_service.rc == 0"

restart service

    - name: ReStart Service due Rollback
      service: name="{{service_name}}" state=restarted enabled=true sleep=5
      register: start_result
      until: "start_result.state is defined and start_result.state == 'started'"
      retries: 3
      delay: 5   

Fixing /dev/shm size

- name: Fixing /dev/shm size
  shell: mount -o remount,size=12G /dev/shm
  when: "(ansible_memtotal_mb*0.5)|int|abs < 12000"

- name: Fixing /dev/shm size
  shell: mount -o remount,size={{ (ansible_memtotal_mb/512)|int|abs }}G /dev/shm
  when: "(ansible_memtotal_mb*0.5)|int|abs >= 12000"

Fixing MySQL permissions

- name: Fixing PRIVILEGES
  shell: mysql -e "UPDATE mysql.user SET Super_Priv='Y' WHERE user='user' AND host='%';FLUSH PRIVILEGES;"
  ignore_errors: yes

Cache from Artifactory locally

- name: Download from Artifactory locally
  local_action:
    module: get_url
    url: "{{ artifact_url }}"
    dest: "{{download_path}}"
    headers: X-JFrog-Art-Api:{{ artifactory_api_key }}

Check Jenkins Started

- block:
    - name: Wait for Jenkins to start up
      uri:
        url: "{{my_jenkins_url}}"
        status_code: 200
        timeout: 5
        user: "{{JENKINS_USERNAME}}"
        password: "{{JENKINS_USERNAME_PASSWORD}}"
        force_basic_auth: yes
      register: jenkins_service_status  
      retries: 60
      delay: 5
      until: >
         'status' in jenkins_service_status and
         (jenkins_service_status['status'] == 200 or jenkins_service_status['status'] == 403)  
      when: jenkins_restart_required
  rescue:
    - name: Wait for Jenkins to start up
      uri:
        url: "{{my_jenkins_url}}"
        status_code: 200
        timeout: 5
      register: jenkins_service_status
      # ignore_errors: true
      # Keep trying for 5 mins in 5 sec intervals
      retries: 60
      delay: 5
      until: >
         'status' in jenkins_service_status and
         jenkins_service_status['status'] == 200
      when: jenkins_restart_required

Create swap

- name: Create swapfile folder {{ swapfile_location | dirname }}
  file:  path="{{ swapfile_location | dirname }}" state=directory

- name: swap - remove current swaps from fstab
  lineinfile:
    dest: /etc/fstab
    regexp: '^/mnt[\S]+\s+none\s+swap '
    state: absent
  when: recreate_swaps | bool
  
- name: swap - disable swap
  shell: for swap_mp in $(awk '/^\/mnt/ {print $1}' /proc/swaps|grep -v "{{ swapfile_location }}");do swapoff $swap_mp;done
  ignore_errors: yes
  when: recreate_swaps | bool

- name: Write swapfile {{ swapfile_location }}
  shell: |
    {% if swapfile_use_dd %}
    dd if=/dev/zero of={{ swapfile_location }} bs=1M count={{ swapfile_size_gb| int * 1024 }} creates={{ swapfile_location }}
    {% else %}
    fallocate -l {{ swapfile_size_gb }}G {{ swapfile_location }} creates={{ swapfile_location }}
    {% endif %}
  register: write_swapfile
  when: recreate_swaps | bool
  tags: ['swap_create_file']

- name: Set swapfile permissions {{ swapfile_location }}
  file: path={{ swapfile_location }} mode=600  
  tags: ['swap_create_file']

- name: Create swapfile {{ swapfile_location }}
  command: mkswap {{ swapfile_location }}
  environment:
    PATH: "{{ (ansible_env|default({})).PATH|default('') }}:/usr/local/sbin:/usr/sbin:/sbin"
  register: create_swapfile
  when: write_swapfile.changed|default(false)
  tags: ['swap_create_file']

- name: Enable swapfile {{ swapfile_location }}
  command: swapon {{ swapfile_location }}
  environment:
    PATH: "{{ (ansible_env|default({})).PATH|default('') }}:/usr/local/sbin:/usr/sbin:/sbin"
  when: write_swapfile.changed|default(false)
  tags: ['swap_create_file']

- name: Add swapfile {{ swapfile_location }} to /etc/fstab
  lineinfile: dest=/etc/fstab line="{{ swapfile_location }}   none    swap    sw    0   0" state=present
  tags: ['swap_create_file']

- name: Enable all
  command: swapon -a
  environment:
    PATH: "{{ (ansible_env|default({})).PATH|default('') }}:/usr/local/sbin:/usr/sbin:/sbin"
  ignore_errors: yes

Kernel Sysctl

--
- name: Create folder
  file: path=/etc/sysctl.d state=directory
  ignore_errors: yes
  tags: ['sysctl.d-create']

- set_fact:
    net_core_somaxconn: "65535"
  when: net_core_somaxconn is not defined and (ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' or ansible_distribution == 'RedHat') and ansible_distribution_major_version == "7" 

- name: Optimizing setting under sysctl
  sysctl: name={{ item.key }}  value={{ item.value }} sysctl_set=yes state=present reload=yes sysctl_file=/etc/sysctl.d/10-optimization.conf
  with_items:
    - { key: 'vm.overcommit_memory', value: '1' }
    - { key: 'fs.file-max', value: '13122664' }
    - { key: 'net.ipv4.tcp_sack', value: '1' }
    - { key: 'net.ipv4.tcp_window_scaling', value: '1' }
    - { key: 'net.ipv4.tcp_congestion_control', value: 'cubic' }
    - { key: 'net.ipv4.tcp_syncookies', value: '1' }
    - { key: 'net.ipv4.ip_nonlocal_bind', value: '1' }
    - { key: 'net.core.netdev_max_backlog', value: '200000' }
    - { key: 'net.core.rmem_max', value: '11586816' }
    - { key: 'net.core.somaxconn', value: "{{ net_core_somaxconn| default('324000')}}" }
    - { key: 'net.core.wmem_max', value: '11586816' }
    - { key: 'net.ipv4.ip_local_port_range', value: '20000 65535' }
    - { key: 'net.ipv4.tcp_fin_timeout', value: '30' }
    - { key: 'net.ipv4.tcp_keepalive_time', value: '300' }
    - { key: 'net.ipv4.tcp_max_orphans', value: '2621440' }
    - { key: 'net.ipv4.tcp_max_syn_backlog', value: '200000' }
    - { key: 'net.ipv4.tcp_max_tw_buckets', value: '72000000' }
    - { key: 'net.ipv4.tcp_rmem', value: '5793408 7724544 11586816' }
    - { key: 'net.ipv4.tcp_slow_start_after_idle', value: '0' }
    - { key: 'net.ipv4.tcp_wmem', value: '5793408 7724544 11586816' }
# error: “net.netfilter.nf_conntrack_tcp_timeout_established”isan unknown key
# https://vultrcoupons.com/2015/06/17/centos-6-5-optimization-of-some-basic-optimization-and-security-settings/
# centos6. X-solutions:
# modprobe nf_conntrack
# echo “modprobe nf_conntrack”>> /etc/rc.local
# Note: when this up centos6.5 make kernel, the author found that if you do not turn on nf_conntrack ip6tables optimization modules to carry out the above solutions still prompts the error above. So when optimization services, you can choose to leave iptables and ip6tables. Of course, if you don’t use iptables, kernel optimization nf_conntrack settings when you need to remove, in/sbin/are not prompted with errors.
- name: Load conntrack
  shell: modprobe ip_conntrack
  ignore_errors: yes
  when: "firewall_v4_default_rules is defined and (nf_conntrack_enable is defined and nf_conntrack_enable | bool == True)"

- name: Optimizing setting under sysctl
  sysctl: name={{ item.key }}  value={{ item.value }} sysctl_set=yes state=present reload=yes sysctl_file=/etc/sysctl.d/10-optimization.conf
  with_items:
    - { key: 'net.netfilter.nf_conntrack_tcp_timeout_established', value: '86400' }
    - { key: 'net.netfilter.nf_conntrack_generic_timeout', value: '120' }
    - { key: 'net.netfilter.nf_conntrack_max', value: '10024000' }
  when: "firewall_v4_default_rules is defined and (nf_conntrack_enable is defined and nf_conntrack_enable | bool == True)"

- name: Optimizing setting under sysctl for Centos 6
  sysctl: name={{ item.key }}  value={{ item.value }} sysctl_set=yes state=present reload=yes sysctl_file=/etc/sysctl.d/10-optimization.conf
  when: (ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' or ansible_distribution == 'RedHat') and ansible_distribution_major_version == "6"  
  with_items:
    - { key: 'net.ipv4.tcp_tw_recycle', value: '1' }
    - { key: 'net.ipv4.tcp_tw_reuse', value: '1' }
    - { key: 'net.ipv4.tcp_timestamps', value: '1' }

- name: Optimizing setting under sysctl for Centos 7
  sysctl: name={{ item.key }}  value={{ item.value }} sysctl_set=yes state=present reload=yes sysctl_file=/etc/sysctl.d/10-optimization.conf
  when: (ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' or ansible_distribution == 'RedHat') and ansible_distribution_major_version == "7"
  with_items:
    - { key: 'net.ipv4.tcp_tw_recycle', value: '0' }
    - { key: 'net.ipv4.tcp_tw_reuse', value: '0' }
    - { key: 'net.ipv4.tcp_timestamps', value: '0' }

- name: Optimizing setting under sysctl
  sysctl: name={{ item.key }}  value={{ item.value }} sysctl_set=yes state=present reload=yes sysctl_file=/etc/sysctl.d/10-optimization.conf
  when: "'HADOOP' not in group_names and 'LOGSTASH' in group_names"
  with_items:    
    - { key: 'vm.swappiness', value: '0' }

- name: Optimizing setting under sysctl
  sysctl: name={{ item.key }}  value={{ item.value }} sysctl_set=yes state=present reload=yes sysctl_file=/etc/sysctl.d/10-optimization.conf
  when: "'HADOOP' in group_names or 'LOGSTASH' in group_names "
  with_items:    
    - { key: 'vm.swappiness', value: '1' }

- name: Optimizing Entropy setting under sysctl (HADOOP_SERVERS)
  sysctl: name={{ item.key }}  value={{ item.value }} sysctl_set=yes state=present reload=yes sysctl_file=/etc/sysctl.d/10-optimization.conf
  when: "'HADOOP' in group_names  "
  with_items:    
    - { key: 'vm.swappiness', value: '1' }

- name: Optimizing setting under sysctl defined on host / group level
  sysctl: name={{ item.key }}  value={{ item.value }} sysctl_set=yes state=present reload=yes sysctl_file=/etc/sysctl.d/10-optimization.conf
  with_items: 
    - { key: 'kernel.random.read_wakeup_threshold', value: '1024' }
    - { key: 'kernel.random.write_wakeup_threshold', value: '2048' }
  tags: ['sysctl_entropy']

DigitalOcean

Ensure SSH KEY of Ansible Control Machine exists at DigitalOcean

Requres the following variables:

  do_token - your DigitalOcean variable
  - name: Ensure SSH KEY was created on Ansible Control Machine
    local_action: 
      module: user 
      name: "{{ lookup('pipe', 'id -un') | d(lookup('pipe', 'whoami'), True) | d(lookup('env', 'USER'), True) |  d(lookup('env', 'user'), True) |  d(lookup('env', 'LOGNAME'), True) }}"
      generate_ssh_key: yes
      ssh_key_file: .ssh/id_rsa

  - name: Ensure SSH KEY of Ansible Control Machine exists at DigitalOcean
    local_action:
      module: digital_ocean 
      state: present
      command: ssh
      name: "{{ lookup('pipe', 'hostname|tr . _') }}__{{env |default('dev')}}_{{ lookup('pipe', 'id -un') | d(lookup('pipe', 'whoami'), True) | d(lookup('env', 'USER'), True) |  d(lookup('env', 'user'), True) |  d(lookup('env', 'LOGNAME'), True) }}_ssh_key"
      ssh_pub_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
      api_token: "{{ do_token }}"
    register: my_ssh_key
    

Hosts Operations

Generate List

ANSIBLE_PLAYBOOK=""
case "${ENV,,}" in
	stage*)
		ANSIBLE_INVENTORY="/etc/Ansible/environments/staging/hosts"
    
	;;
	prod*)
		ANSIBLE_INVENTORY="/etc/Ansible/environments/production/hosts"
	;;
	*)
		echo "Wrong \$ENV"
		exit 1
	;;
esac

if [[ "${ANSIBLE_INVENTORY}" = "" ]] || [[ ! -e "${ANSIBLE_INVENTORY}" ]];then
	echo "Can't detect ansible inventory"
	exit 1
fi
LIST=$(ansible -i "${ANSIBLE_INVENTORY}" "${HOSTS}" --list-hosts | tail  -n +2| awk -v HOST=$UPDATESTRING '{ HOST=HOST","$1 } END {print substr(HOST,2); }')
ansible -i "${ANSIBLE_INVENTORY}" "${ANSIBLE_PLAYBOOK}" -e 'var_ansible_hosts="'$(echo "$LIST")'"' 
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment