Skip to content

Instantly share code, notes, and snippets.

@a1git
Created February 10, 2017 20:13
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save a1git/0e5bbbd72582578ee6aa3a5f54d0a496 to your computer and use it in GitHub Desktop.
Save a1git/0e5bbbd72582578ee6aa3a5f54d0a496 to your computer and use it in GitHub Desktop.
552 ? S 0:00 upstart-socket-bridge --daemon
ok: [c16] => (item=c16) [290/1944]
skipping: [c14] => (item=c16)
skipping: [c16_cinder_scheduler_container-4a0918b6] => (item=c16)
skipping: [c15_cinder_scheduler_container-5431dade] => (item=c16)
skipping: [c14_cinder_scheduler_container-3ecfd985] => (item=c16)
skipping: [c14] => (item=c15)
skipping: [c16_cinder_scheduler_container-4a0918b6] => (item=c15)
skipping: [c15_cinder_scheduler_container-5431dade] => (item=c15)
skipping: [c14_cinder_scheduler_container-3ecfd985] => (item=c15)
ok: [c16] => (item=c15)
skipping: [c14] => (item=c14)
skipping: [c16_cinder_scheduler_container-4a0918b6] => (item=c14)
skipping: [c15_cinder_scheduler_container-5431dade] => (item=c14)
skipping: [c14_cinder_scheduler_container-3ecfd985] => (item=c14)
ok: [c16] => (item=c14)
TASK [os_cinder : Add in cinder devices types] *********************************
fatal: [c16]: FAILED! => {"failed": true, "msg": "the field 'args' has an invalid value, which appears to include a variable that is undefined. The error was: 'dict object'
has no attribute 'volume_backend_name'\n\nThe error appears to have been in '/etc/ansible/roles/os_cinder/tasks/cinder_backends.yml': line 30, column 3, but may\nbe elsewher
e in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\n- name: Add in cinder devices types\n ^ here\n"}
TASK [os_cinder : Add extra cinder volume types] *******************************
TASK [os_cinder : include] *****************************************************
TASK [os_cinder : Discover lvm devices] ****************************************
skipping: [c16_cinder_api_container-3a478857]
skipping: [c15_cinder_api_container-419a98bf]
skipping: [c14_cinder_api_container-c264ed38]
skipping: [c15]
skipping: [c14]
skipping: [c16_cinder_scheduler_container-4a0918b6]
skipping: [c15_cinder_scheduler_container-5431dade]
skipping: [c14_cinder_scheduler_container-3ecfd985]
CONFIG
---
cidr_networks:
container: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
used_ips:
- "172.29.236.1,172.29.236.50"
- "172.29.240.1,172.29.240.50"
- "172.29.244.1,172.29.244.50"
- "172.29.248.1,172.29.248.50"
global_overrides:
internal_lb_vip_address: cloud101int.stack31.com
external_lb_vip_address: cloud101.stack31.com
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_ssh_address: true
- network:
container_bridge: "br-vxlan"
container_type: "veth"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "10001:99999"
net_name: "vxlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth12"
host_bind_override: "ens2"
type: "flat"
net_name: "flat"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth11"
type: "vlan"
range: "100:2000"
net_name: "vlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-storage"
container_type: "veth"
container_interface: "eth2"
ip_from_q: "storage"
type: "raw"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
###
### Infrastructure
###
# galera, memcache, rabbitmq, utility
shared-infra_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# repository (apt cache, python packages, etc)
repo-infra_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
# Dedicated hardware is best for improved performance and security.
haproxy_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# rsyslog server
log_hosts:
c13:
ip: 172.29.236.13
###
### OpenStack
###
# keystone
identity_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# cinder api services
storage-infra_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# glance
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
image_hosts:
c14:
ip: 172.29.236.14
container_vars:
limit_container_types: glance
glance_nfs_client:
- server: "172.29.244.11"
remote_path: "/srv/glance"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
c15:
ip: 172.29.236.15
container_vars:
limit_container_types: glance
glance_nfs_client:
- server: "172.29.244.11"
remote_path: "/srv/glance"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
c16:
ip: 172.29.236.16
container_vars:
limit_container_types: glance
glance_nfs_client:
- server: "172.29.244.11"
remote_path: "/srv/glance"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
# nova api, conductor, etc services
compute-infra_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# heat
orchestration_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# horizon
dashboard_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# neutron server, agents (L3, etc)
network_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# ceilometer (telemetry API)
metering-infra_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# aodh (telemetry alarm service)
metering-alarm_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# gnocchi (telemetry metrics storage)
metrics_hosts:
c14:
ip: 172.29.236.14
c15:
ip: 172.29.236.15
c16:
ip: 172.29.236.16
# nova hypervisors
compute_hosts:
c24:
ip: 172.29.236.24
c25:
ip: 172.29.236.25
# ceilometer compute agent (telemetry)
metering-compute_hosts:
c24:
ip: 172.29.236.24
c25:
ip: 172.29.236.25
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
storage_hosts:
c14:
ip: 172.29.236.14
container_vars:
cinder_backends:
#volume_backend_name: NFS
limit_container_types: cinder_volume
cinder_nfs_client:
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.11"
share: "/srv/cinder"
c15:
ip: 172.29.236.15
container_vars:
cinder_backends:
#volume_backend_name: NFS
limit_container_types: cinder_volume
cinder_nfs_client:
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.11"
share: "/srv/cinder"
c16:
ip: 172.29.236.16
container_vars:
cinder_backends:
#volume_backend_name: NFS
limit_container_types: cinder_volume
cinder_nfs_client:
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.11"
share: "/srv/cinder"
@a1git
Copy link
Author

a1git commented Feb 10, 2017

storage_hosts:
c14:
ip: 172.29.236.14
container_vars:
cinder_backends:
#volume_backend_name: NFS
limit_container_types: cinder_volume
cinder_nfs_client:
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.11"
share: "/srv/cinder"

change to

storage_hosts:
c14:
ip: 172.29.236.14
container_vars:
cinder_backends:
limit_container_types: cinder_volume
cinder_nfs_client:
nfs-volume1:
volume_backend_name: NFS_VOLUME1
volume_driver: cinder.volume.drivers.nfs.NfsDriver
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.11"
share: "/srv/cinder"

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment