Created
September 23, 2016 16:43
-
-
Save vlpeng/5a28b80b25efe1c1a8fc459a78e46ff2 to your computer and use it in GitHub Desktop.
ovirt-gluster.conf
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# A sample configuration file to setup a converged ovirt/gluster install | |
# using gdeploy: https://github.com/gluster/gdeploy | |
# edit hosts to match your environment | |
[hosts] | |
host1 | |
host2 | |
host3 | |
[yum0] | |
action=install | |
packages=http://resources.ovirt.org/pub/yum-repo/ovirt-release40.rpm | |
[yum1] | |
action=install | |
gpgcheck=yes | |
packages=vdsm,vdsm-gluster,ovirt-hosted-engine-setup,screen,firewalld | |
update=yes | |
# Setup ntp on the servers before any other operations are done | |
# Disable the existing public servers | |
[shell1] | |
action=execute | |
command=sed -i 's/^\(server .*iburst\)/#\1/' /etc/ntp.conf | |
# Add custom NTP server | |
[update-file1] | |
action=add | |
dest=/etc/ntp.conf | |
line=server 0.rhel.pool.ntp.org iburst | |
[service1] | |
action=enable | |
service=ntpd | |
[service2] | |
action=restart | |
service=ntpd | |
[shell2] | |
action=execute | |
command=vdsm-tool configure --force | |
[service3] | |
action=start | |
service=vdsmd | |
# Disable multipath | |
[script1] | |
action=execute | |
file=/usr/share/ansible/gdeploy/scripts/disable-multipath.sh | |
# Edit to match your environment, below assumes a single | |
# device for gluster at /dev/vdb | |
[pv] | |
action=create | |
devices=vdb | |
[vg1] | |
action=create | |
vgname=gluster | |
pvname=vdb | |
[lv1] | |
action=create | |
vgname=centos | |
lvname=engine | |
lvtype=thick | |
size=25GB | |
mount=/gluster/brick1 | |
[lv2] | |
action=create | |
vgname=centos | |
poolname=lvthinpool | |
lvtype=thinpool | |
poolmetadatasize=20MB | |
chunksize=1024k | |
size=70GB | |
[lv3] | |
action=create | |
lvname=data | |
poolname=lvthinpool | |
vgname=centos | |
lvtype=thinlv | |
mount=/gluster/brick2 | |
virtualsize=50GB | |
[lv4] | |
action=create | |
lvname=export | |
poolname=lvthinpool | |
vgname=centos | |
lvtype=thinlv | |
mount=/gluster/brick3 | |
virtualsize=50GB | |
[lv5] | |
action=create | |
lvname=iso | |
poolname=lvthinpool | |
vgname=centos | |
lvtype=thinlv | |
mount=/gluster/brick4 | |
virtualsize=50GB | |
[selinux] | |
yes | |
[service4] | |
action=stop | |
service=NetworkManager | |
[service5] | |
action=disable | |
service=NetworkManager | |
[shell3] | |
action=execute | |
command=mkdir /etc/systemd/system/glusterd.service.d, | |
[shell4] | |
action=execute | |
command=echo -e "[Service]\nCPUAccounting=yes\nSlice=glusterfs.slice" >> /etc/systemd/system/glusterd.service.d/99-cpu.conf, | |
[shell5] | |
action=execute | |
command=echo -e "[Slice]\nCPUQuota=400%" >> /etc/systemd/system/glusterfs.slice,systemctl daemon-reload | |
[service6] | |
action=restart | |
service=glusterd | |
[service7] | |
action=start | |
service=firewalld | |
[firewalld] | |
action=add | |
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp | |
services=glusterfs | |
[script2] | |
action=execute | |
file=/usr/share/ansible/gdeploy/scripts/disable-gluster-hooks.sh | |
[volume1] | |
action=create | |
volname=engine | |
transport=tcp | |
replica=yes | |
replica_count=3 | |
arbiter_count=1 | |
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,nfs.disable,performance.strict-o-direct,network.remote-dio | |
value=virt,36,36,on,512MB,32,full,granular,10000,6,30,off,on,on,off | |
brick_dirs=/gluster/brick1/engine | |
[volume2] | |
action=create | |
volname=data | |
transport=tcp | |
replica=yes | |
replica_count=3 | |
arbiter_count=1 | |
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,nfs.disable,performance.strict-o-direct,network.remote-dio | |
value=virt,36,36,on,512MB,32,full,granular,10000,6,30,off,on,on,off | |
brick_dirs=/gluster/brick2/data | |
[volume3] | |
action=create | |
volname=export | |
transport=tcp | |
replica=yes | |
replica_count=3 | |
arbiter_count=1 | |
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,nfs.disable,performance.strict-o-direct,network.remote-dio | |
value=virt,36,36,on,512MB,32,full,granular,10000,6,30,off,on,on,off | |
brick_dirs=/gluster/brick3/export | |
[volume4] | |
action=create | |
volname=iso | |
transport=tcp | |
replica=yes | |
replica_count=3 | |
arbiter_count=1 | |
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,nfs.disable,performance.strict-o-direct,network.remote-dio | |
value=virt,36,36,on,512MB,32,full,granular,10000,6,30,off,on,on,off | |
brick_dirs=/gluster/brick4/iso | |
# modify to match your host1 | |
[yum2:host1] | |
action=install | |
gpgcheck=no | |
packages=ovirt-engine-appliance |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Try getting rid of the [pv] section. That's for taking a disk device, in my example, /dev/vdb, and making it into a physical volume for lvm. Then you can put volume groups on that and then logical volumes in those. It looks like you have one disk, vda, and it's split between a boot partition and a big pv, and there's already a vg using all the space in that pv. So, we don't create a new pv, we don't create a new vg. We put our gluster lv's in the existing
centos
vg.