Skip to content

Instantly share code, notes, and snippets.

@danilochilene
Last active January 8, 2018 17:07
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save danilochilene/19b97b818078045b0842b22f42fe0040 to your computer and use it in GitHub Desktop.
Save danilochilene/19b97b818078045b0842b22f42fe0040 to your computer and use it in GitHub Desktop.
OVS with OSA for Pike
Enter each neutron agent container, and run the following:
ovs-vsctl add-br br-provider
ovs-vsctl add-port br-provider eth12
On each compute node, run the following:
ovs-vsctl add-br br-provider
ovs-vsctl add-port br-provider br-vlan
auto lo
iface lo inet loopback
dns-nameservers 172.31.7.243
dns-search maas
auto eno1
iface eno1 inet manual
mtu 1500
auto eno2
iface eno2 inet manual
mtu 1500
auto ens15
iface ens15 inet manual
bond-master bond0
mtu 9000
auto ens15d1
iface ens15d1 inet manual
bond-master bond0
mtu 9000
auto bond0
iface bond0 inet manual
bond-lacp-rate 1
mtu 9000
bond-mode 802.3ad
bond-miimon 100
bond-slaves none
bond-xmit_hash_policy layer3+4
auto bond0.100
iface bond0.100 inet manual
vlan-raw-device bond0
auto br-mgmt
iface br-mgmt inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
address 10.20.20.10/24
gateway 10.20.20.2
bridge_ports eno2
auto br-storage
iface br-storage inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
address 192.168.1.10
netmask 255.255.255.0
mtu 9000
bridge_ports bond0
auto br-vlan
iface br-vlan inet manual
bridge_stp off
bridge_waitport 0
bridge_fd 0
mtu 1500
bridge_ports eno1
auto br-vxlan
iface br-vxlan inet static
address 10.30.30.10
netmask 255.255.255.0
bridge_stp off
bridge_waitport 0
bridge_fd 0
mtu 9000
bridge_ports bond0.100
source /etc/network/interfaces.d/*.cfg
---
cidr_networks:
container: 10.20.20.0/24
tunnel: 10.30.30.0/24
storage: 192.168.1.0/24
used_ips:
- "10.20.20.1,10.20.20.50"
- "10.20.20.100,10.20.20.150"
- "10.30.30.1,10.30.30.50"
- "10.30.30.100,10.30.30.150"
- "192.168.1.1,192.168.1.50"
- "192.168.1.100,192.168.11.50"
global_overrides:
internal_lb_vip_address: 10.20.20.50
#
# The below domain name must resolve to an IP address
# in the CIDR specified in haproxy_keepalived_external_vip_cidr.
# If using different protocols (https/http) for the public/internal
# endpoints the two addresses must be different.
#
external_lb_vip_address: openstack.mydc.net
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_ssh_address: true
- network:
container_bridge: "br-vxlan"
container_type: "veth"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
group_binds:
- neutron_openvswitch_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth12"
type: "vlan"
range: "10:1000"
net_name: "physnet1"
group_binds:
- neutron_openvswitch_agent
- network:
container_bridge: "br-storage"
container_type: "veth"
container_interface: "eth2"
ip_from_q: "storage"
type: "raw"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
- mons
###
### Infrastructure
###
# galera, memcache, rabbitmq, utility
shared-infra_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# repository (apt cache, python packages, etc)
repo-infra_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
# Dedicated hardware is best for improved performance and security.
haproxy_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# rsyslog server
log_hosts:
infra4:
ip: 10.20.20.13
###
### OpenStack
###
# keystone
identity_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# cinder api services
storage-infra_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# glance
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
image_hosts:
infra1:
ip: 10.20.20.10
# container_vars:
# limit_container_types: glance
# glance_nfs_client:
# - server: "172.29.244.15"
# remote_path: "/images"
# local_path: "/var/lib/glance/images"
# type: "nfs"
# options: "_netdev,auto"
infra2:
ip: 10.20.20.11
# container_vars:
# limit_container_types: glance
# glance_nfs_client:
# - server: "172.29.244.15"
# remote_path: "/images"
# local_path: "/var/lib/glance/images"
# type: "nfs"
# options: "_netdev,auto"
infra3:
ip: 10.20.20.12
# container_vars:
# limit_container_types: glance
# glance_nfs_client:
# - server: "172.29.244.15"
# remote_path: "/images"
# local_path: "/var/lib/glance/images"
# type: "nfs"
# options: "_netdev,auto"
# nova api, conductor, etc services
compute-infra_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# heat
orchestration_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# horizon
dashboard_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# neutron server, agents (L3, etc)
network_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# ceilometer (telemetry data collection)
metering-infra_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# aodh (telemetry alarm service)
metering-alarm_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# gnocchi (telemetry metrics storage)
metrics_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
# nova hypervisors
compute_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
infra4:
ip: 10.20.20.13
# ceilometer compute agent (telemetry data collection)
metering-compute_hosts:
infra1:
ip: 10.20.20.10
infra2:
ip: 10.20.20.11
infra3:
ip: 10.20.20.12
infra4:
ip: 10.20.20.13
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
storage_hosts:
stor1:
ip: 192.168.1.100
container_vars:
cinder_backends:
limit_container_types: cinder_volume
rbd:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.rbd.RBDDriver
volume_backend_name: rbd
rbd_pool: cinder-volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_user: "{{ cinder_ceph_client }}"
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
stor2:
ip: 192.168.1.101
container_vars:
cinder_backends:
limit_container_types: cinder_volume
rbd:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.rbd.RBDDriver
volume_backend_name: rbd
rbd_pool: cinder-volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_user: "{{ cinder_ceph_client }}"
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
stor3:
ip: 192.168.1.102
container_vars:
cinder_backends:
limit_container_types: cinder_volume
rbd:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.rbd.RBDDriver
volume_backend_name: rbd
rbd_pool: cinder-volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_user: "{{ cinder_ceph_client }}"
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
stor4:
ip: 192.168.1.103
container_vars:
cinder_backends:
limit_container_types: cinder_volume
rbd:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.rbd.RBDDriver
volume_backend_name: rbd
rbd_pool: cinder-volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_user: "{{ cinder_ceph_client }}"
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
---
- name: Setup OVS bridges
hosts: neutron_agents_container
user: root
tasks:
- name: Setup br-provider
openvswitch_bridge:
bridge: br-provider
state: present
notify:
- Restart neutron-agents-container
- name: Add port to br-provider
openvswitch_port:
bridge: br-provider
port: "eth12"
state: present
notify:
- Restart neutron-agents-container
handlers:
- name: Restart neutron-agents-container
service:
name: neutron_agents_container
state: restarted
---
debug: true
pip_validate_certs: false
openstack_host_specific_kernel_modules:
- name: "openvswitch"
pattern: "CONFIG_OPENVSWITCH"
group: "network_hosts"
glance_ceph_client: images
glance_default_store: rbd
glance_rbd_store_pool: images
glance_rbd_store_chunk_size: 8
nova_libvirt_images_rbd_pool: vms
dhcp_domain: openstack.net
cinder_ceph_client: cinder
cephx: true
ceph_mons:
- 192.168.1.100
- 192.168.1.101
- 192.168.1.102
rabbitmq_monitoring_userid: monitoring
ssh_delay: 30
haproxy_use_keepalived: True
haproxy_keepalived_internal_vip_cidr: "{{internal_lb_vip_address}}/32"
haproxy_keepalived_internal_interface: br-mgmt
openstack_service_publicuri_proto: http
openstack_external_ssl: false
haproxy_ssl: false
neutron_plugin_base:
- router
- metering
- neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2
- neutron_dynamic_routing.services.bgp.bgp_plugin.BgpPlugin
- dns
neutron_plugin_type: ml2.ovs.dvr
neutron_ml2_drivers_type: "flat,vlan,vxlan"
neutron_l2_population: True
neutron_vxlan_enabled: True
neutron_vxlan_group: "239.1.1.1"
neutron_provider_networks:
network_flat_networks: "*"
network_types: "vxlan"
network_vlan_ranges: "physnet1:10:1000"
network_mappings: "physnet1:br-provider"
network_vxlan_ranges: "1:1000"
@danilochilene
Copy link
Author

danilochilene commented Nov 14, 2017

@danilochilene
Copy link
Author

For fast deploys.


cinder_backend_serial: "100%"
cinder_api_serial: "100%"
glance_api_serial: "100%"
glance_registry_serial: "100%"
glance_api_serial: "100%"
neutron_server_serial: "100%"
neutron_agent_serial: "100%"
neutron_other_serial: "100%"
nova_conductor_serial: "100%"
nova_scheduler_serial: "100%"
nova_api_serial: "100%"
nova_console_serial: "100%"
nova_compute_serial: "100%"
nova_serial: "100%"

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment