Skip to content

Instantly share code, notes, and snippets.

@Tahvok
Last active November 5, 2017 16:35
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Tahvok/8d522fa308041529b8248b2dfc2e6e4c to your computer and use it in GitHub Desktop.
Save Tahvok/8d522fa308041529b8248b2dfc2e6e4c to your computer and use it in GitHub Desktop.
OVS with OSA for Ocata
#loopback
auto lo
iface lo inet loopback
auto eth2
iface eth2 inet manual
bond-master bond0
auto eth3
iface eth3 inet manual
bond-master bond1
auto eth4
iface eth4 inet manual
bond-master bond0
auto eth5
iface eth5 inet manual
bond-master bond1
### BOND
auto bond0
iface bond0 inet manual
bond-mode 802.3ad
bond-slaves none
bond-miimon 100
auto bond1
iface bond1 inet manual
bond-mode 802.3ad
bond-slaves none
bond-miimon 100
### VLAN
auto bond0.2
iface bond0.2 inet manual
up ip link set dev $IFACE up
down ip link set dev $IFACE down
up ip link set $IFACE promisc on
down ip link set $IFACE promisc off
auto bond0.5
iface bond0.5 inet manual
vlan-raw-device bond0
auto bond0.3
iface bond0.403 inet manual
vlan-raw-device bond0
auto bond1.7
iface bond1.307 inet manual
vlan-raw-device bond1
### BRIDGE
auto br-mgmt
iface br-mgmt inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports bond0.5
address 1.1.1.2
netmask 255.255.254.0
gateway 1.1.2.254
auto br-vxlan
iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports bond0.3
address 2.2.2.3
netmask 255.255.254.0
auto br-vlan
iface br-vlan inet manual
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports bond0
auto br-storage
iface br-storage inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports bond1.7
address 3.3.3.4
netmask 255.255.254.0
#loopback
auto lo
iface lo inet loopback
auto eth2
iface eth2 inet manual
bond-master bond0
auto eth3
iface eth3 inet manual
bond-master bond1
auto eth4
iface eth4 inet manual
bond-master bond0
auto eth5
iface eth5 inet manual
bond-master bond1
# BOND
auto bond0
iface bond0 inet manual
bond-mode 802.3ad
bond-slaves none
bond-miimon 100
auto bond1
iface bond1 inet manual
bond-mode 802.3ad
bond-slaves none
bond-miimon 100
### VLAN
auto bond0.2
iface bond0.2 inet manual
up ip link set dev $IFACE up
down ip link set dev $IFACE down
up ip link set $IFACE promisc on
down ip link set $IFACE promisc off
auto bond0.5
iface bond0.5 inet manual
vlan-raw-device bond0
auto bond1.7
iface bond1.7 inet manual
vlan-raw-device bond1
auto bond1.3
iface bond1.3 inet manual
vlan-raw-device bond1
### BRIDGE
auto br-mgmt
iface br-mgmt inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports bond0.5
address 1.1.1.1
netmask 255.255.254.0
gateway 1.1.2.254
auto br-vxlan
iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports bond1.3
address 2.2.2.2
netmask 255.255.254.0
auto br-vlan
iface br-vlan inet manual
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports bond0.2
auto br-storage
iface br-storage inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports bond1.7
address 3.3.3.3
netmask 255.255.254.0
Enter each neutron agent container, and run the following:
ovs-vsctl add-br br-provider
ovs-vsctl add-port br-provider eth12
On each compute node, run the following:
ovs-vsctl add-br br-provider
ovs-vsctl add-port br-provider bond0.2
---
cidr_networks:
container: 1.1.1.0/23
tunnel: 2.2.2.0/23
storage: 3.3.3.0/23
used_ips:
- 1.1.1.5
global_overrides:
# The internal and external VIP should be different IPs, however they
# do not need to be on separate networks.
external_lb_vip_address: cloud.domain.com
internal_lb_vip_address: 1.1.1.7
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_ssh_address: true
gateway: "1.1.2.254"
- network:
container_bridge: "br-vxlan"
container_type: "veth"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
group_binds:
- neutron_openvswitch_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth12"
host_bind_override: "br-vlan"
type: "flat"
net_name: "flat"
group_binds:
- neutron_openvswitch_agent
# - network:
# container_bridge: "br-vlan"
# container_type: "veth"
# container_interface: "eth11"
# type: "vlan"
# range: "1:1"
# net_name: "vlan"
# group_binds:
# - neutron_openvswitch_agent
- network:
container_bridge: "br-storage"
container_type: "veth"
container_interface: "eth2"
ip_from_q: "storage"
type: "raw"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
openstack_host_specific_kernel_modules:
- name: "openvswitch"
pattern: "CONFIG_OPENVSWITCH="
group: "network_hosts"
neutron_plugin_type: ml2.ovs
neutron_ml2_drivers_type: "flat,vlan,vxlan"
neutron_provider_networks:
network_flat_networks: "provider"
network_types: "vxlan"
network_vxlan_ranges: "1:1000"
network_vlan_ranges: "provider"
network_mappings: "provider:br-provider"
neutron_plugin_base:
- router
- metering
- neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2
- neutron_dynamic_routing.services.bgp.bgp_plugin.BgpPlugin
- dns
neutron_l2_population: True
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment