Skip to content

Instantly share code, notes, and snippets.

@jasonbrooks
Last active February 28, 2017 15:48
Show Gist options
  • Save jasonbrooks/a5484769eea5a8cf2fa9d32329d5ebe5 to your computer and use it in GitHub Desktop.
Save jasonbrooks/a5484769eea5a8cf2fa9d32329d5ebe5 to your computer and use it in GitHub Desktop.
# A sample configuration file to setup ROBO, based on https://github.com/gluster/gdeploy/blob/master/examples/hc.conf
[hosts]
host1
host2
host3
# Do a sanity check before proceding
[script1]
action=execute
file=/usr/share/ansible/gdeploy/scripts/grafton-sanity-check.sh
[yum0]
action=install
packages=http://resources.ovirt.org/pub/yum-repo/ovirt-release41.rpm
[yum1]
action=install
gpgcheck=yes
packages=vdsm,vdsm-gluster,ovirt-hosted-engine-setup,screen
update=yes
# Setup ntp on the servers before any other operations are done
# Disable the existing public servers
[shell1]
action=execute
command=sed -i 's/^\(server .*iburst\)/#\1/' /etc/ntp.conf
# Add custom NTP server
[update-file1]
action=add
dest=/etc/ntp.conf
line=server clock.redhat.com iburst
[service1]
action=enable
service=ntpd
[service2]
action=restart
service=ntpd
[shell2]
action=execute
command=vdsm-tool configure --force
# Disable multipath
[script2]
action=execute
file=/usr/share/ansible/gdeploy/scripts/disable-multipath.sh
# Edit to match your environment, below assumes a single
# device for gluster at /dev/vdb
[pv]
action=create
devices=vdb
[vg1]
action=create
vgname=gluster_vg1
pvname=vdb
[lv1]
action=create
vgname=gluster_vg1
lvname=engine_lv
lvtype=thick
size=25GB
mount=/gluster/brick1
[lv2]
action=create
vgname=gluster_vg1
poolname=lvthinpool
lvtype=thinpool
poolmetadatasize=10MB
chunksize=1024k
size=30GB
[lv3]
action=create
lvname=lv_data
poolname=lvthinpool
vgname=gluster_vg1
lvtype=thinlv
mount=/gluster/brick2
virtualsize=19GB
[lv4]
action=create
lvname=lv_export
poolname=lvthinpool
vgname=gluster_vg1
size=19GB
lvtype=thinlv
mount=/gluster/brick3
virtualsize=19GB
[lv5]
action=create
lvname=lv_iso
poolname=lvthinpool
vgname=gluster_vg1
size=19GB
lvtype=thinlv
mount=/gluster/brick4
virtualsize=19GB
[selinux]
yes
[service3]
action=stop
service=NetworkManager
[service4]
action=disable
service=NetworkManager
[service5]
action=start
service=glusterd
slice_setup=yes
[firewalld]
action=add
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp
services=glusterfs
[script3]
action=execute
file=/usr/share/ansible/gdeploy/scripts/disable-gluster-hooks.sh
[volume1]
action=create
volname=engine
transport=tcp
replica=yes
replica_count=3
arbiter_count=1
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,on,512MB,32,full,granular,10000,8,30,off,on,off,on
brick_dirs=/gluster/brick1/engine
[volume2]
action=create
volname=data
transport=tcp
replica=yes
replica_count=3
arbiter_count=1
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,on,512MB,32,full,granular,10000,8,30,off,on,off,on
brick_dirs=/gluster/brick2/data
[volume3]
action=create
volname=export
transport=tcp
replica=yes
replica_count=3
arbiter_count=1
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,on,512MB,32,full,granular,10000,8,30,off,on,off,on
brick_dirs=/gluster/brick3/export
[volume4]
action=create
volname=iso
transport=tcp
replica=yes
replica_count=3
arbiter_count=1
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,on,512MB,32,full,granular,10000,8,30,off,on,off,on
brick_dirs=/gluster/brick4/iso
[yum2:host1]
action=install
gpgcheck=no
packages=ovirt-engine-appliance
#[shell3:host1]
#action=execute
#command=mkdir /home/tmp
#[shell4:host1]
#action=execute
#command=hosted-engine --deploy --config-append=<path-to-he-common.conf> --config-append=<path-to-he-answers.conf>
# [shell8]
# action=execute
# command=reboot
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment