Skip to content

Instantly share code, notes, and snippets.

@mseeks
Created August 6, 2015 16:34
Show Gist options
  • Save mseeks/5af28583ce26e1a2ae91 to your computer and use it in GitHub Desktop.
Save mseeks/5af28583ce26e1a2ae91 to your computer and use it in GitHub Desktop.
#cloud-config
---
write_files:
- path: /opt/bin/waiter.sh
owner: root
content: |
#! /usr/bin/bash
until curl http://127.0.0.1:2379/v2/machines; do sleep 2; done
coreos:
etcd2:
discovery: https://discovery.etcd.io/ca240c8274e7535e4ca70b0a9a6d3924
advertise-client-urls: http://$public_ipv4:2379
initial-advertise-peer-urls: http://$private_ipv4:2380
listen-client-urls: http://0.0.0.0:2379
listen-peer-urls: http://$private_ipv4:2380
fleet:
public-ip: $public_ipv4
flannel:
interface: $public_ipv4
units:
- name: etcd2.service
command: start
- name: fleet.service
command: start
- name: etcd-waiter.service
command: start
content: |
[Unit]
Wants=network-online.target
Wants=etcd2.service
After=etcd2.service
After=network-online.target
Before=flannel.service
Before=docker.service
Before=setup-network-environment.service
Before=ceph-etcd-config.service
[Service]
ExecStartPre=/usr/bin/chmod +x /opt/bin/waiter.sh
ExecStart=/usr/bin/bash /opt/bin/waiter.sh
RemainAfterExit=true
Type=oneshot
- name: ceph-etcd-config.service
command: start
content: |
[Unit]
Description=Ceph Etcd Configuration
After=etcd-waiter.service
[Service]
EnvironmentFile=/etc/environment
RemainAfterExit=yes
Type=oneshot
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/auth/cephx true
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/auth/cephx_require_signatures false
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/auth/cephx_cluster_require_signatures true
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/auth/cephx_service_require_signatures false
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/global/max_open_files 131072
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/global/osd_pool_default_pg_num 128
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/global/osd_pool_default_pgp_num 128
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/global/osd_pool_default_size 3
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/global/osd_pool_default_min_size 1
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/global/mon_osd_full_ratio .95
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/global/mon_osd_nearfull_ratio .85
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/mon/mon_osd_down_out_interval 600
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/mon/mon_osd_min_down_reporters 4
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/mon/mon_clock_drift_allowed .15
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/mon/mon_clock_drift_warn_backoff 30
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/mon/mon_osd_report_timeout 300
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/journal_size 100
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/cluster_network 192.168.42.0/24
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/public_network 192.168.42.0/24
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_mkfs_type xfs
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_mkfs_options_xfs "\-f \-i size=2048"
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_mon_heartbeat_interval 30
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/pool_default_crush_rule 0
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_crush_update_on_start true
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_objectstore filestore
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/filestore_merge_threshold 40
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/filestore_split_multiple 8
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_op_threads 8
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/filestore_op_threads 8
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/filestore_max_sync_interval 5
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_max_scrubs 1
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_recovery_max_active 5
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_max_backfills 2
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_recovery_op_priority 2
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_client_op_priority 63
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_recovery_max_chunk 1048576
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/osd_recovery_threads 1
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/ms_bind_port_min 6800
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/osd/ms_bind_port_max 7100
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/client/rbd_cache_enabled true
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/client/rbd_cache_writethrough_until_flush true
ExecStart=/usr/bin/etcdctl set ceph-config/ceph/mds/mds_cache_size 100000
- name: ceph-mon.service
command: start
content: |
[Unit]
Description=Ceph Monitor
After=docker.service
After=ceph-etcd-config.service
[Service]
ExecStartPre=/usr/bin/docker pull ceph/daemon
ExecStartPre=-/usr/bin/docker kill ceph-mon
ExecStartPre=-/usr/bin/docker rm ceph-mon
ExecStart=/usr/bin/docker run -d --name ceph-mon --net=host -v /Users/msull92/var/lib/ceph/:/var/lib/ceph/ -e MON_IP=${COREOS_PUBLIC_IPV4} -e CEPH_PUBLIC_NETWORK=172.17.8.0/24 -e KV_TYPE=etcd -e KV_IP=${COREOS_PUBLIC_IPV4} -e KV_PORT=2379 ceph/daemon mon
ExecStop=-/usr/bin/docker stop ceph-mon
[X-Fleet]
Global=true
- name: ceph-osd.service
command: start
content: |
[Unit]
Description=Ceph Object Storage Daemon
After=docker.service
After=ceph-etcd-config.service
[Service]
ExecStartPre=/usr/bin/docker pull ceph/daemon
ExecStartPre=-/usr/bin/docker kill ceph-osd
ExecStartPre=-/usr/bin/docker rm ceph-osd
ExecStart=/usr/bin/docker run -d --name ceph-osd --net=host --privileged=true -v /Users/msull92/var/lib/ceph/:/var/lib/ceph/ -v /dev/:/dev/ -e OSD_DEVICE=/dev/vdd -e OSD_FORCE_ZAP=1 -e MON_IP=${COREOS_PUBLIC_IPV4} -e CEPH_PUBLIC_NETWORK=172.17.8.0/24 -e KV_TYPE=etcd -e KV_IP=${COREOS_PUBLIC_IPV4} -e KV_PORT=2379 ceph/daemon osd
ExecStop=-/usr/bin/docker stop ceph-osd
[X-Fleet]
Global=true
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment