Skip to content

Instantly share code, notes, and snippets.

@sachinkagarwal
Forked from zachfi/fabfile.py
Created January 25, 2016 16:32
Show Gist options
  • Save sachinkagarwal/88cf955a89283b5c7a9a to your computer and use it in GitHub Desktop.
Save sachinkagarwal/88cf955a89283b5c7a9a to your computer and use it in GitHub Desktop.
Ceph fabfile.py
#! /usr/bin/env
from fabric.api import *
env.key_filename = '~/.ssh/id_rsa'
osd = {
'node1.l.n3kl.cx': [
'/var/local/osd00',
'/var/local/osd01',
'/var/local/osd02',
'/var/local/osd03',
],
'node2.l.n3kl.cx': [
'/var/local/osd10',
'/var/local/osd11',
]
}
env.roledefs = {
'admin': ['node1.l.n3kl.cx'],
'mon': ['node1.l.n3kl.cx'],
'mds': ['node1.l.n3kl.cx'],
'osd': osd.keys()
}
@task
@roles('osd')
def prepare():
sudo('apt-get update')
sudo('apt-get -qy install less curl sudo lsb-release')
with settings(warn_only=True):
sudo('useradd -d /home/ceph -m ceph')
run('echo -e "asdlkjasd\nasdlkjasd\n" | sudo passwd ceph')
run('echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph')
sudo('chmod 0440 /etc/sudoers.d/ceph')
@roles('admin')
def preflight():
sudo('wget -q -O- "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc" | sudo apt-key add -')
sudo('echo deb http://ceph.com/debian-firefly/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list')
sudo('apt-get update && apt-get -y install ceph-deploy')
@roles('admin')
def deploy_admin_ssh_config():
put('ssh_config', '.ssh/config')
with settings(warn_only=True):
run('rm .ssh/id_rsa*')
run('ssh-keygen -f .ssh/id_rsa -N ""')
get('.ssh/id_rsa.pub','id_rsa.pub')
@roles('osd')
def copy_key():
with settings(user='root'):
with settings(warn_only=True):
run('mkdir -m 0700 /home/ceph/.ssh')
run('chown ceph:ceph /home/ceph/.ssh')
put('id_rsa.pub', '/home/ceph/.ssh/authorized_keys')
@roles('admin')
def new_cluster():
with settings(warn_only=True):
run('mkdir cluster')
with cd('cluster'):
run('ceph-deploy new ' + env.roledefs['mon'][0])
run('echo "[default]" >> ceph.conf')
run('echo "osd pool default size = 2" >> ceph.conf')
@roles('admin')
def ceph_deploy_install():
run('ceph-deploy install ' + " ".join(env.roledefs['osd']))
with cd('cluster'):
#run('ceph-deploy mon create-initial')
run('ceph-deploy mon create ' + env.roledefs['mon'][0])
run('ceph-deploy gatherkeys ' + env.roledefs['mon'][0])
@roles('admin')
def setup_osd():
with cd('cluster'):
for host in osd.keys():
for device in osd[host]:
for action in ['prepare','activate']:
cmd = [
'ceph-deploy',
'osd',
action,
host + ':' + device
]
with settings(warn_only=True):
sudo('mkdir ' + device)
run(" ".join(cmd))
@roles('admin')
def setup_mds():
with cd('cluster'):
run('ceph-deploy mds create ' + env.roledefs['mds'][0])
@roles('admin')
def finish():
with cd('cluster'):
run('ceph-deploy admin ' + " ".join(env.roledefs['osd']))
sudo('chmod +r /etc/ceph/ceph.client.admin.keyring')
""" Here because something in the conf is not allowing only two osds to be a complete cluster """
run('ceph osd pool set data size 2')
run('ceph osd pool set rbd size 2 && ceph osd pool set metadata size 2')
@task
def cluster():
execute(deploy_admin_ssh_config)
execute(copy_key)
execute(new_cluster)
execute(ceph_deploy_install)
execute(setup_osd)
execute(finish)
execute(setup_mds)
@roles('admin','osd','mon')
def destroy_common():
with settings(warn_only=True,user='root'):
run('/etc/init.d/ceph stop')
run('rm -rf /var/local/osd*/*')
@roles('admin')
def destroy_admin():
with settings(warn_only=True):
for x in env.roledefs['osd']:
run('ceph-deploy purge ' + x)
run('ceph-deploy purgedata ' + x)
run('ceph-deploy forgetkeys')
run('rm -rf cluster')
run('rm .cephdeploy.conf')
@task
def destroy():
execute(destroy_common)
execute(destroy_admin)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment