Skip to content

Instantly share code, notes, and snippets.

@a1git
Created March 3, 2017 14:18
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save a1git/520c731d2c5962b0afd9abd8a23e9675 to your computer and use it in GitHub Desktop.
Save a1git/520c731d2c5962b0afd9abd8a23e9675 to your computer and use it in GitHub Desktop.
$ openstack-ansible os-magnum-install.yml -vvvv
Variable files: "-e @/etc/openstack_deploy/user_secrets.yml -e @/etc/openstack_deploy/user_variables_default.yml -e @/etc/openstack_deploy/user_variables.yml "
No config file found; using defaults
statically included: /etc/ansible/roles/pip_install/tasks/configure.yml
statically included: /etc/ansible/roles/pip_install/tasks/install.yml
statically included: /etc/ansible/roles/pip_install/tasks/pre_install.yml
statically included: /etc/ansible/roles/galera_client/tasks/galera_client_install.yml
statically included: /etc/ansible/roles/galera_client/tasks/galera_client_post_install.yml
statically included: /etc/ansible/roles/os_magnum/tasks/magnum_pre_install.yml
statically included: /etc/ansible/roles/os_magnum/tasks/magnum_install.yml
statically included: /etc/ansible/roles/os_magnum/tasks/magnum_post_install.yml
statically included: /etc/ansible/roles/os_magnum/tasks/magnum_init.yml
statically included: /etc/ansible/roles/os_magnum/tasks/magnum_init_common.yml
statically included: /etc/ansible/roles/os_magnum/tasks/magnum_init_common.yml
statically included: /etc/ansible/roles/os_magnum/tasks/magnum_db_setup.yml
statically included: /etc/ansible/roles/os_magnum/tasks/magnum_service_setup.yml
statically included: /etc/ansible/roles/rsyslog_client/tasks/rsyslog_client_pre_install.yml
statically included: /etc/ansible/roles/rsyslog_client/tasks/rsyslog_client_install.yml
statically included: /etc/ansible/roles/rsyslog_client/tasks/rsyslog_client_post_install.yml
statically included: /opt/openstack-ansible/playbooks/common-tasks/os-lxc-container-setup.yml
statically included: /opt/openstack-ansible/playbooks/common-tasks/os-log-dir-setup.yml
Loading callback plugin default of type stdout, v2.0 from /opt/ansible-runtime/lib/python2.7/site-packages/ansible/plugins/callback/__init__.pyc
Loading callback plugin profile_tasks of type aggregate, v2.0 from /opt/ansible-runtime/lib/python2.7/site-packages/ansible/plugins/callback/__init__.pyc
PLAYBOOK: os-magnum-install.yml ************************************************
1 plays in os-magnum-install.yml
PLAY RECAP *********************************************************************
Friday 03 March 2017 14:15:31 +0000 (0:00:00.015) 0:00:00.015 **********
---
cidr_networks:
container: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
used_ips:
- "172.29.236.1,172.29.236.50"
- "172.29.240.1,172.29.240.50"
- "172.29.244.1,172.29.244.50"
- "172.29.248.1,172.29.248.50"
global_overrides:
internal_lb_vip_address: cloud101int.stack31.com
external_lb_vip_address: cloud101.stack31.com
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_ssh_address: true
- network:
container_bridge: "br-vxlan"
container_type: "veth"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth12"
host_bind_override: "ens2"
type: "flat"
net_name: "flat"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth11"
type: "vlan"
range: "1:1"
net_name: "vlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-storage"
container_type: "veth"
container_interface: "eth2"
ip_from_q: "storage"
type: "raw"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
###
### Infrastructure
###
# galera, memcache, rabbitmq, utility
shared-infra_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# repository (apt cache, python packages, etc)
repo-infra_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
# Dedicated hardware is best for improved performance and security.
haproxy_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# rsyslog server
log_hosts:
c14:
ip: 172.29.236.14
###
### OpenStack
###
# keystone
identity_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# cinder api services
storage-infra_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# glance
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
image_hosts:
c11:
ip: 172.29.236.11
container_vars:
limit_container_types: glance
glance_nfs_client:
- server: "172.29.244.15"
remote_path: "/srv/glance"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
c12:
ip: 172.29.236.12
container_vars:
limit_container_types: glance
glance_nfs_client:
- server: "172.29.244.15"
remote_path: "/srv/glance"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
c13:
ip: 172.29.236.13
container_vars:
limit_container_types: glance
glance_nfs_client:
- server: "172.29.244.15"
remote_path: "/srv/glance"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
# nova api, conductor, etc services
compute-infra_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# heat
orchestration_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# horizon
dashboard_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# neutron server, agents (L3, etc)
network_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# ceilometer (telemetry API)
metering-infra_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# aodh (telemetry alarm service)
metering-alarm_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# gnocchi (telemetry metrics storage)
metrics_hosts:
c11:
ip: 172.29.236.11
c12:
ip: 172.29.236.12
c13:
ip: 172.29.236.13
# nova hypervisors
compute_hosts:
c23:
ip: 172.29.236.23
c24:
ip: 172.29.236.24
# ceilometer compute agent (telemetry)
metering-compute_hosts:
c23:
ip: 172.29.236.23
c24:
ip: 172.29.236.24
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
storage_hosts:
c11:
ip: 172.29.236.11
container_vars:
cinder_backends:
limit_container_types: cinder_volume
nfs_volume:
volume_backend_name: VOLUME_SERVER
volume_driver: cinder.volume.drivers.nfs.NfsDriver
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.15"
share: "/srv/cinder"
c12:
ip: 172.29.236.12
container_vars:
cinder_backends:
limit_container_types: cinder_volume
nfs_volume:
volume_backend_name: VOLUME_SERVER
volume_driver: cinder.volume.drivers.nfs.NfsDriver
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.15"
share: "/srv/cinder"
c13:
ip: 172.29.236.13
container_vars:
cinder_backends:
limit_container_types: cinder_volume
nfs_volume:
volume_backend_name: VOLUME_SERVER
volume_driver: cinder.volume.drivers.nfs.NfsDriver
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.15"
share: "/srv/cinder"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment