Skip to content

Instantly share code, notes, and snippets.

@olafbuitelaar
Created April 29, 2019 10:52
Show Gist options
  • Save olafbuitelaar/d2d24b07dbd377b62fb7d42d40a93046 to your computer and use it in GitHub Desktop.
Save olafbuitelaar/d2d24b07dbd377b62fb7d42d40a93046 to your computer and use it in GitHub Desktop.
[defaults]
host_key_checking = no
roles_path = /vagrant/roles:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
[ssh_connection]
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes
localhost ansible_connection=local
- hosts: localhost
tasks:
- name: "install git"
yum:
name: "{{ packages }}"
vars:
packages:
- git
- name: "check-out glustr.infra role"
git:
repo: https://github.com/olafbuitelaar/gluster-ansible-infra.git
dest: /vagrant/roles/gluster.infra
- hosts: localhost
vars:
gluster_infra_vdo:
# - { name: 'ans_vdo_1', device: '/dev/ans_vg/ans_thick1_vdo' }
- { name: 'ans_vdo_2', device: '/dev/ans_vg/ans_thick2_vdo' }
gluster_infra_volume_groups:
- { vgname: 'ans_vg', pvname: '/dev/sdb' }
- { vgname: 'ans_vg', pvname: '/dev/sdc' }
- { vgname: 'ans_vg', pvname: '/dev/mapper/ans_vdo_2' }
gluster_infra_thinpools:
# - {vgname: 'ans_vg', thinpoolname: 'ans_thinpool1', thinpoolsize: '1G', poolmetadatasize: '15M', pvs: /dev/mapper/ans_vdo_1 }
- {vgname: 'ans_vg', thinpoolname: 'ans_thinpool2', thinpoolsize: '1G', poolmetadatasize: '15M', pvs: '/dev/sdb,/dev/sdc' }
# - {vgname: 'ans_vg', thinpoolname: 'ans_thinpool3', thinpoolsize: '1G', poolmetadatasize: '15M' }
# - {vgname: 'ans_vg', thinpoolname: 'ans_thinpool4', thinpoolsize: '1G', poolmetadatasize: '15M', opts: "--type raid1", pvs: '/dev/sdb,/dev/sdc' }
# - {vgname: 'ans_vg', thinpoolname: 'ans_thinpool5', thinpoolsize: '1G', poolmetadatasize: '15M', opts: "", pvs: '/dev/sdb,/dev/sdc', meta_opts: '--type raid1', meta_pvs: '/dev/sdd,/dev/sde' }
- {vgname: 'ans_vg', thinpoolname: 'ans_thinpool2_vdo', thinpoolsize: '1G', poolmetadatasize: '15M', opts: "", pvs: '/dev/mapper/ans_vdo_2', meta_opts: '--type raid1', meta_pvs: '/dev/sdd,/dev/sde' }
gluster_infra_lv_logicalvols:
# - { vgname: 'ans_vg', thinpool: 'ans_thinpool1', lvname: 'ans_thinlv1', lvsize: '1G', pvs: /dev/mapper/ans_vdo_1 }
# - { vgname: 'ans_vg', thinpool: 'ans_thinpool2', lvname: 'ans_thinlv2', lvsize: '1G', pvs: '/dev/sdb,/dev/sdc' }
# - { vgname: 'ans_vg', thinpool: 'ans_thinpool3', lvname: 'ans_thinlv3', lvsize: '1G' }
# - { vgname: 'ans_vg', thinpool: 'ans_thinpool4', lvname: 'ans_thinlv4', lvsize: '1G', opts: "--type raid1", pvs: '/dev/sdb,/dev/sdc' }
# - { vgname: 'ans_vg', thinpool: 'ans_thinpool4', lvname: 'ans_thinlv5', lvsize: '1G', pvs: '/dev/sdb,/dev/sdc', meta_opts: '--type raid1', meta_pvs: '/dev/sdd,/dev/sde' }
# - { vgname: 'ans_vg', thinpool: 'ans_thinpool4', lvname: 'ans_thinlv6', lvsize: '100M', meta_size: '100M', meta_opts: '--type raid1', meta_pvs: '/dev/sdd,/dev/sde' }
- { vgname: 'ans_vg', thinpool: 'ans_thinpool2_vdo', lvname: 'ans_thinlv7', lvsize: '100M', pvs: '/dev/mapper/ans_vdo_2' }
gluster_infra_thick_lvs:
# - { vgname: 'ans_vg', lvname: 'ans_thick1_vdo', size: '9G', skipfs: yes, pvs: /dev/sdb }
- { vgname: 'ans_vg', lvname: 'ans_thick2', size: '1G', }
# - { vgname: 'ans_vg', lvname: 'ans_thick3', size: '1G', skipfs: yes, opts: "--type raid1" }
# - { vgname: 'ans_vg', lvname: 'ans_thick4', size: '1G', skipfs: yes, opts: "--type raid1", pvs: '/dev/sdb,/dev/sdc' }
# - { vgname: 'ans_vg', lvname: 'ans_thick5', size: '1G', opts: "", pvs: '/dev/sdb,/dev/sdc' }
- { vgname: 'ans_vg', lvname: 'ans_thick2_vdo', size: '9G', skipfs: yes, opts: "", pvs: '/dev/sdb,/dev/sdc' }
- { vgname: 'ans_vg', lvname: 'ans_thick6', size: '1G', opts: "", pvs: '/dev/mapper/ans_vdo_2' }
gluster_infra_cache_vars:
# - { vgname: 'ans_vg', cachedisk: '/dev/sde1', cachethinpoolname: 'ans_thinpool1', cachelvname: 'cache-ans_thinpool1', cachelvsize: '10M', cachemetalvname: 'cachemeta-ans_thinpool1', cachemetalvsize: '100M', cachemode: 'writethrough' }
- { vgname: 'ans_vg', cachedisk: '/dev/sdc,/dev/sdd', cachethinpoolname: 'ans_thinpool2', cachelvname: 'cache-ans_thinpool2', cachelvsize: '10M', cachemetalvsize: '100M', meta_opts: '--type raid1', meta_pvs: '/dev/sdc,/dev/sdd', cachemode: 'writethrough' }
gluster_infra_mount_devices:
# - { path: '/gfs/brick1', vgname: 'ans_vg', lvname: 'ans_vg_thinlv' }
# - { path: '/mnt/test', vgname: 'ans_vg_vdo', lvname: 'ans_vg_thinlv4' }
pre_tasks:
- name: "install vdo packages"
yum:
name: "{{ packages }}"
vars:
packages:
- PyYAML
- kmod-kvdo
- vdo
roles:
- { role: gluster.infra }
# -*- mode: ruby -*-
# vi: set ft=ruby :
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure("2") do |config|
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.
# Every Vagrant development environment requires a box. You can search for
# boxes at https://atlas.hashicorp.com/search.
config.vm.box = "centos/7"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
# config.vm.network "forwarded_port", guest: 80, host: 8080
# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "192.168.33.10"
# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"
# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
dataDisk1 = './dataDisk1.vdi'
dataDisk2 = './dataDisk2.vdi'
ssdDisk1 = './ssdDisk1.vdi'
ssdDisk2 = './ssdDisk2.vdi'
# $x=""
# $createController = true
# config.trigger.before :up, type: :hook do |t|
# t.info = "More information with ruby magic"
# t.ruby = proc{|env,machine|
# puts $createController
# $x=`VBoxManage showvminfo #{machine.id} --machinereadable`
# $createController = !/storagecontrollername\d+="SATA Controller"/.match?($x)
# puts $createController
# config.vm.provider "virtualbox" do |vb|
# if $createController
# vb.customize ['storagectl', :id, '--name', 'SATA Controller', '--add', 'sata', '--portcount', 4]
# end
# end
# }
# end
# config.trigger.before :up,
# info: "More information with ruby magic",
# ruby: proc{|env,machine| puts $createController }
config.trigger.after :up,
info: "Write marker file",
ruby: proc{|env,machine| File.open("./hasStorageController", "w") {} }
config.vm.provider "virtualbox" do |vb|
# # Display the VirtualBox GUI when booting the machine
# vb.gui = true
#
# # Customize the amount of memory on the VM:
vb.memory = "2048"
if not File.exists?(dataDisk1)
vb.customize ['createmedium', '--filename', dataDisk1, '--size', 80 * 1024]
end
if not File.exists?(dataDisk2)
vb.customize ['createmedium', '--filename', dataDisk2, '--size', 80 * 1024]
end
if not File.exists?(ssdDisk1)
vb.customize ['createmedium', '--filename', ssdDisk1, '--size', 10 * 1024]
end
if not File.exists?(ssdDisk2)
vb.customize ['createmedium', '--filename', ssdDisk2, '--size', 10 * 1024]
end
if not File.exists?("./hasStorageController")
vb.customize ['storagectl', :id, '--name', 'SATA Controller', '--add', 'sata', '--portcount', 4]
#File.open("./hasStorageController", "w") {}
end
vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', dataDisk1]
vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', 2, '--device', 0, '--type', 'hdd', '--medium', dataDisk2]
vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', 3, '--device', 0, '--type', 'hdd', '--medium', ssdDisk1, '--nonrotational','on']
vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', 4, '--device', 0, '--type', 'hdd', '--medium', ssdDisk2, '--nonrotational','on']
end
#
# View the documentation for the provider you are using for more
# information on available options.
# Define a Vagrant Push strategy for pushing to Atlas. Other push strategies
# such as FTP and Heroku are also available. See the documentation at
# https://docs.vagrantup.com/v2/push/atlas.html for more information.
# config.push.define "atlas" do |push|
# push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME"
# end
# Enable provisioning with a shell script. Additional provisioners such as
# Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
# documentation for more information about their specific syntax and use.
# config.vm.provision "shell", inline: <<-SHELL
# apt-get update
# apt-get install -y apache2
# SHELL
config.vm.provision "ansible_local" do |ansible|
ansible.become = true
ansible.inventory_path = "inventory"
ansible.playbook = "playbook-install-role.yml"
ansible.limit = "all"
end
config.vm.provision "ansible_local" do |ansible|
ansible.become = true
ansible.inventory_path = "inventory"
ansible.playbook = "playbook.yml"
ansible.limit = "all"
end
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment