Skip to content

Instantly share code, notes, and snippets.

@a1git
Created March 3, 2017 14:36
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save a1git/8da8cb401c38fa6cf77f88b42bc677fb to your computer and use it in GitHub Desktop.
Save a1git/8da8cb401c38fa6cf77f88b42bc677fb to your computer and use it in GitHub Desktop.
$ openstack-ansible os-magnum-install.yml -vvvv
Variable files: "-e @/etc/openstack_deploy/user_secrets.yml -e @/etc/openstack_deploy/user_variables_default.yml -e @/etc/openstack_deploy/user_variables.yml "
No config file found; using defaults
ERROR! Attempted to execute "/opt/openstack-ansible/playbooks/inventory/dynamic_inventory.py" as inventory script: Inventory script (/opt/openstack-ansible/playbooks/inventory/dynamic_inventory.py) had an execution error: Traceback (most recent call last):
File "/opt/openstack-ansible/playbooks/inventory/dynamic_inventory.py", line 78, in <module>
output = generate.main(**all_args)
File "/opt/openstack-ansible/playbooks/inventory/../../lib/generate.py", line 1100, in main
user_defined_config
File "/opt/openstack-ansible/playbooks/inventory/../../lib/generate.py", line 730, in container_skel_load
static_routes=p_net.get('static_routes')
File "/opt/openstack-ansible/playbooks/inventory/../../lib/generate.py", line 572, in _add_additional_networks
static_routes
File "/opt/openstack-ansible/playbooks/inventory/../../lib/generate.py", line 638, in _add_additional_networks
phg = user_config[cphg][container_host]
KeyError: u'c16'
---
cidr_networks:
container: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
used_ips:
- "172.29.236.1,172.29.236.50"
- "172.29.240.1,172.29.240.50"
- "172.29.244.1,172.29.244.50"
- "172.29.248.1,172.29.248.50"
global_overrides:
internal_lb_vip_address: cloud101int.stack31.com
external_lb_vip_address: cloud101.stack31.com
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_ssh_address: true
- network:
container_bridge: "br-vxlan"
container_type: "veth"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth12"
host_bind_override: "ens2"
type: "flat"
net_name: "flat"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth11"
type: "vlan"
range: "1:1"
net_name: "vlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-storage"
container_type: "veth"
container_interface: "eth2"
ip_from_q: "storage"
type: "raw"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
###
### Infrastructure
###
#magnum
magnum-infra_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# galera, memcache, rabbitmq, utility
shared-infra_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# repository (apt cache, python packages, etc)
repo-infra_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
# Dedicated hardware is best for improved performance and security.
haproxy_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# rsyslog server
log_hosts:
c14:
ip: 172.29.236.14
###
### OpenStack
###
# keystone
identity_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# cinder api services
storage-infra_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# glance
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
image_hosts:
c11:
ip: 172.29.236.11
container_vars:
limit_container_types: glance
glance_nfs_client:
- server: "172.29.244.15"
remote_path: "/srv/glance"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
c12:
ip: 172.29.236.12
container_vars:
limit_container_types: glance
glance_nfs_client:
- server: "172.29.244.15"
remote_path: "/srv/glance"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
c13:
ip: 172.29.236.13
container_vars:
limit_container_types: glance
glance_nfs_client:
- server: "172.29.244.15"
remote_path: "/srv/glance"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
# nova api, conductor, etc services
compute-infra_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# heat
orchestration_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# horizon
dashboard_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# neutron server, agents (L3, etc)
network_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# ceilometer (telemetry API)
metering-infra_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# aodh (telemetry alarm service)
metering-alarm_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# gnocchi (telemetry metrics storage)
metrics_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# nova hypervisors
compute_hosts:
c23:
ip: 172.29.236.23
c24:
ip: 172.29.236.24
# ceilometer compute agent (telemetry)
metering-compute_hosts:
c23:
ip: 172.29.236.23
c24:
ip: 172.29.236.24
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
storage_hosts:
c11:
ip: 172.29.236.11
container_vars:
cinder_backends:
limit_container_types: cinder_volume
nfs_volume:
volume_backend_name: VOLUME_SERVER
volume_driver: cinder.volume.drivers.nfs.NfsDriver
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.15"
share: "/srv/cinder"
c12:
ip: 172.29.236.12
container_vars:
cinder_backends:
limit_container_types: cinder_volume
nfs_volume:
volume_backend_name: VOLUME_SERVER
volume_driver: cinder.volume.drivers.nfs.NfsDriver
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.15"
share: "/srv/cinder"
c13:
ip: 172.29.236.13
container_vars:
cinder_backends:
limit_container_types: cinder_volume
nfs_volume:
volume_backend_name: VOLUME_SERVER
volume_driver: cinder.volume.drivers.nfs.NfsDriver
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.15"
share: "/srv/cinder"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment