Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save alandosreis/c13719f1c8ec5f7483892da6288c590b to your computer and use it in GitHub Desktop.
Save alandosreis/c13719f1c8ec5f7483892da6288c590b to your computer and use it in GitHub Desktop.
---
cidr_networks:
container: 10.20.20.0/24
tunnel: 10.30.30.0/24
storage: 192.168.1.0/24
used_ips:
- "10.20.20.1,10.20.20.50"
- "10.20.20.100,10.20.20.150"
- "10.30.30.1,10.30.30.50"
- "10.30.30.100,10.30.30.150"
- "192.168.1.1,192.168.1.50"
- "192.168.1.100,192.168.11.50"
global_overrides:
internal_lb_vip_address: 10.20.20.50
#
# The below domain name must resolve to an IP address
# in the CIDR specified in haproxy_keepalived_external_vip_cidr.
# If using different protocols (https/http) for the public/internal
# endpoints the two addresses must be different.
#
external_lb_vip_address: vip.openstack.net
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_ssh_address: true
- network:
container_bridge: "br-vxlan"
container_type: "veth"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth12"
host_bind_override: "eno1"
type: "flat"
net_name: "flat"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth11"
type: "vlan"
range: "1:1"
net_name: "vlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-storage"
container_type: "veth"
container_interface: "eth2"
ip_from_q: "storage"
type: "raw"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
- mons
###
### Infrastructure
###
# galera, memcache, rabbitmq, utility
shared-infra_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# repository (apt cache, python packages, etc)
repo-infra_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
# Dedicated hardware is best for improved performance and security.
haproxy_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# rsyslog server
log_hosts:
infra4:
ip: 10.20.20.13
###
### OpenStack
###
# keystone
identity_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# cinder api services
storage-infra_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# glance
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
image_hosts:
infra1:
ip: 10.20.20.10
# container_vars:
# limit_container_types: glance
# glance_nfs_client:
# - server: "172.29.244.15"
# remote_path: "/images"
# local_path: "/var/lib/glance/images"
# type: "nfs"
# options: "_netdev,auto"
infra2:
ip: 10.20.20.11
# container_vars:
# limit_container_types: glance
# glance_nfs_client:
# - server: "172.29.244.15"
# remote_path: "/images"
# local_path: "/var/lib/glance/images"
# type: "nfs"
# options: "_netdev,auto"
infra3:
ip: 10.20.20.12
# container_vars:
# limit_container_types: glance
# glance_nfs_client:
# - server: "172.29.244.15"
# remote_path: "/images"
# local_path: "/var/lib/glance/images"
# type: "nfs"
# options: "_netdev,auto"
# nova api, conductor, etc services
compute-infra_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# heat
orchestration_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# horizon
dashboard_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# neutron server, agents (L3, etc)
network_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# ceilometer (telemetry data collection)
metering-infra_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# aodh (telemetry alarm service)
metering-alarm_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# gnocchi (telemetry metrics storage)
metrics_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# nova hypervisors
compute_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
infra4:
ip: 10.20.20.13
# ceilometer compute agent (telemetry data collection)
metering-compute_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
infra4:
ip: 10.20.20.13
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
storage_hosts:
stor1:
ip: 192.168.1.100
container_vars:
cinder_backends:
limit_container_types: cinder_volume
rbd:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.rbd.RBDDriver
volume_backend_name: rbd
rbd_pool: cinder-volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_user: "{{ cinder_ceph_client }}"
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
stor2:
ip: 192.168.1.101
container_vars:
cinder_backends:
limit_container_types: cinder_volume
rbd:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.rbd.RBDDriver
volume_backend_name: rbd
rbd_pool: cinder-volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_user: "{{ cinder_ceph_client }}"
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
stor3:
ip: 192.168.1.102
container_vars:
cinder_backends:
limit_container_types: cinder_volume
rbd:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.rbd.RBDDriver
volume_backend_name: rbd
rbd_pool: cinder-volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_user: "{{ cinder_ceph_client }}"
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
stor4:
ip: 192.168.1.103
container_vars:
cinder_backends:
limit_container_types: cinder_volume
rbd:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.rbd.RBDDriver
volume_backend_name: rbd
rbd_pool: cinder-volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_user: "{{ cinder_ceph_client }}"
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment