Skip to content

Instantly share code, notes, and snippets.

@ekarlso
Created June 23, 2017 08:05
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ekarlso/9bfa0e0560b84ec286bbbbef34ab790d to your computer and use it in GitHub Desktop.
Save ekarlso/9bfa0e0560b84ec286bbbbef34ab790d to your computer and use it in GitHub Desktop.
ovirt-gluster.conf
# A sample configuration file to setup ROBO, based on https://github.com/gluster/gdeploy/blob/master/examples/hc.conf
[hosts]
ovirt0
ovirt2
ovirt3
# Do a sanity check before proceding
[script1]
action=execute
file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh
[yum0]
action=install
packages=http://resources.ovirt.org/pub/yum-repo/ovirt-release41.rpm
[yum1]
action=install
gpgcheck=yes
packages=vdsm,vdsm-gluster,ovirt-hosted-engine-setup,screen,ntp
update=yes
# Setup ntp on the servers before any other operations are done
# Disable the existing public servers
[shell1]
action=execute
command=sed -i 's/^\(server .*iburst\)/#\1/' /etc/ntp.conf
# Add custom NTP server
[update-file1]
action=add
dest=/etc/ntp.conf
line=server clock.redhat.com iburst
[service1]
action=enable
service=ntpd
[service2]
action=restart
service=ntpd
[shell2]
action=execute
command=vdsm-tool configure --force
# Disable multipath
[script2]
action=execute
file=/usr/share/gdeploy/scripts/disable-multipath.sh
# Edit to match your environment, below assumes a single
# device for gluster at /dev/vdb
[pv]
action=create
devices=sdb,sdc,sdd,sde,sdf,sdg
[vg1]
action=create
vgname=vg.gluster1
pvname=sdb
[vg2]
action=create
vgname=vg.gluster2
pvname=sdc
[vg3]
action=create
vgname=vg.gluster3
pvname=sdd
[vg4]
action=create
vgname=vg.gluster4
pvname=sde
[vg5]
action=create
vgname=vg.gluster5
pvname=sdf
[vg6]
action=create
vgname=vg.gluster6
pvname=sdg
[lv1]
action=create
vgname=vg.gluster1
lvname=lv.engine
lvtype=thick
size=50GB
mount=/gluster/brick1
[lv2]
action=create
#poolname=pool.iso
vgname=vg.gluster2
lvname=lv_iso
lvtype=thick
size=50GB
mount=/gluster/brick2
[lv3]
action=create
vgname=vg.gluster3
poolname=pool.default
lvtype=thinpool
poolmetadatasize=10MB
chunksize=1024k
size=50GB
[lv4]
action=create
vgname=vg.gluster1
poolname=pool.default
lvname=lv.brick1
lvtype=thinlv
size=1950GB
virtualsize=1950GB
mount=/gluster/brick3
[lv5]
action=create
vgname=vg.gluster2
poolname=pool.default
lvname=lv.brick2
lvtype=thinlv
size=1950GB
virtualsize=1950GB
mount=/gluster/brick4
[lv6]
action=create
vgname=vg.gluster3
poolname=pool.default
lvname=lv.brick3
lvtype=thinlv
size=1950GB
virtualsize=1950GB
mount=/gluster/brick5
[lv7]
action=create
vgname=vg.gluster4
poolname=pool.default
lvname=lv.brick4
lvtype=thinlv
size=1950GB
virtualsize=1950GB
mount=/gluster/brick6
[lv8]
action=create
vgname=vg.gluster5
poolname=pool.default
lvname=lv.brick5
lvtype=thinlv
size=1950GB
virtualsize=1950GB
mount=/gluster/brick7
[lv9]
action=create
vgname=vg.gluster6
poolname=pool.default
lvname=lv.brick6
lvtype=thinlv
size=1950GB
virtualsize=1950GB
mount=/gluster/brick8
[selinux]
no
[service3]
action=stop
service=NetworkManager
[service4]
action=disable
service=NetworkManager
[service5]
action=start
service=glusterd
slice_setup=yes
[firewalld]
action=add
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp
services=glusterfs
[script3]
action=execute
file=/usr/share/gdeploy/scripts/disable-gluster-hooks.sh
[volume1]
action=create
volname=engine
transport=tcp
replica=yes
replica_count=3
arbiter_count=1
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,on,512MB,32,full,granular,10000,8,30,off,on,off,on
brick_dirs=/gluster/brick1/engine
[volume2]
action=create
volname=data
transport=tcp
replica=yes
replica_count=3
arbiter_count=1
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,on,512MB,32,full,granular,10000,8,30,off,on,off,on
brick_dirs=/gluster/brick2/iso
[volume3]
action=create
volname=data
transport=tcp
replica=yes
replica_count=3
arbiter_count=1
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,on,512MB,32,full,granular,10000,8,30,off,on,off,on
brick_dirs=/gluster/brick3/data,/gluster/brick4/data,/gluster/brick5/data,/gluster/brick6/data,/gluster/brick7/data,/gluster/brick6/data
[yum2:host1]
action=install
gpgcheck=no
packages=ovirt-engine-appliance
#[shell3:host1]
#action=execute
#command=mkdir /home/tmp
#[shell4:host1]
#action=execute
#command=hosted-engine --deploy --config-append=<path-to-he-common.conf> --config-append=<path-to-he-answers.conf>
# [shell8]
# action=execute
# command=reboot
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment