Skip to content

Instantly share code, notes, and snippets.

@a1git
Created July 19, 2018 07:40
Show Gist options
  • Save a1git/af6467b1ce523d6f95c9f705ba8f1318 to your computer and use it in GitHub Desktop.
Save a1git/af6467b1ce523d6f95c9f705ba8f1318 to your computer and use it in GitHub Desktop.
OSA + CEPH
ERROR:
TASK [ceph-osd : verify devices have been provided] ************************************************************************************************************
Thursday 19 July 2018 07:36:17 +0000 (0:00:00.071) 0:11:34.213 *********
fatal: [s3]: FAILED! => {"changed": false, "failed": true, "msg": "please provide devices to your osd scenario"}
fatal: [s2]: FAILED! => {"changed": false, "failed": true, "msg": "please provide devices to your osd scenario"}
fatal: [s1]: FAILED! => {"changed": false, "failed": true, "msg": "please provide devices to your osd scenario"}
PLAY RECAP *****************************************************************************************************************************************************
## /etc/openstack_deploy/host_vars/s1.yml
osd_scenario: non-collocated
journal_size: 10240
dedicated_devices:
- /dev/disk/by-path/pci-0000:03:00.0-scsi-0:0:2:0'
- /dev/disk/by-path/pci-0000:03:00.0-scsi-0:0:3:0'
## /etc/openstack_deploy/host_vars/s2.yml
osd_scenario: non-collocated
journal_size: 10240
dedicated_devices:
- /dev/disk/by-path/pci-0000:03:00.0-scsi-0:0:2:0'
- /dev/disk/by-path/pci-0000:03:00.0-scsi-0:0:3:0'
## /etc/openstack_deploy/host_vars/s3.yml
osd_scenario: non-collocated
journal_size: 10240
dedicated_devices:
- /dev/disk/by-path/pci-0000:03:00.0-scsi-0:0:2:0'
- /dev/disk/by-path/pci-0000:03:00.0-scsi-0:0:3:0'
- /dev/disk/by-path/pci-0000:03:00.0-scsi-0:0:4:0'
## /etc/openstack_deploy/inventory.ini
[all]
## /etc/openstack_deploy/user_variables.yml
---
haproxy_keepalived_external_vip_cidr: "10.10.0.10/32"
haproxy_keepalived_internal_vip_cidr: "172.29.236.10/22"
haproxy_keepalived_external_interface: eno1
haproxy_keepalived_internal_interface: br-mgmt
generate_fsid: false
fsid: 9f734d2b-2a0d-419a-86bc-8adba9893ead # Replace with your generated UUID
monitor_address_block: "{{ cidr_networks.container }}"
public_network: "{{ cidr_networks.container }}"
cluster_network: "{{ cidr_networks.storage }}"
osd_scenario: non-collocated
journal_size: 10240 # size in MB
openstack_config: true
cinder_ceph_client: cinder
glance_ceph_client: glance
glance_default_store: rbd
glance_rbd_store_pool: images
nova_libvirt_images_rbd_pool: vms
cinder_backends:
RBD:
volume_driver: cinder.volume.drivers.rbd.RBDDriver
rbd_pool: volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_store_chunk_size: 8
volume_backend_name: rbddriver
rbd_user: "{{ cinder_ceph_client }}"
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
report_discard_supported: true
neutron_plugin_base:
- router
- firewall
- neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2
- vpnaas
- metering
- qos
haproxy_ssl_self_signed_regen: false
haproxy_user_ssl_cert: /opt/ssl/domain.pem
haproxy_user_ssl_key: /opt/ssl/domain.key
resolvconf_enabled : False
osd_objectstore: bluestore
## openstack_user_config.yml
---
cidr_networks: &cidr_networks
container: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
used_ips:
- "172.29.236.1,172.29.236.200"
- "172.29.240.1,172.29.240.200"
- "172.29.244.1,172.29.244.200"
- "172.29.248.1,172.29.248.200"
global_overrides:
cidr_networks: *cidr_networks
internal_lb_vip_address: cloud-int.domain.com
#
# The below domain name must resolve to an IP address
# in the CIDR specified in haproxy_keepalived_external_vip_cidr.
# If using different protocols (https/http) for the public/internal
# endpoints the two addresses must be different.
#
external_lb_vip_address: cloud.domain.com
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_ssh_address: true
- network:
container_bridge: "br-vxlan"
container_type: "veth"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "1000000:9999999"
net_name: "vxlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth12"
host_bind_override: "eth12"
type: "flat"
net_name: "flat"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth11"
type: "vlan"
range: "100:3000"
net_name: "vlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-storage"
container_type: "veth"
container_interface: "eth2"
ip_from_q: "storage"
type: "raw"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
- ceph-osd
###
### Infrastructure
###
_infrastructure_hosts: &infrastructure_hosts
c1:
ip: 172.29.236.11
c2:
ip: 172.29.236.12
c3:
ip: 172.29.236.13
# nova hypervisors
compute_hosts: &compute_hosts
h1:
ip: 172.29.236.51
h2:
ip: 172.29.236.52
ceph-osd_hosts:
s1:
ip: 172.29.236.21
s2:
ip: 172.29.236.22
s3:
ip: 172.29.236.23
# galera, memcache, rabbitmq, utility
shared-infra_hosts: *infrastructure_hosts
# ceph-mon containers
ceph-mon_hosts: *infrastructure_hosts
# repository (apt cache, python packages, etc)
repo-infra_hosts: *infrastructure_hosts
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
# Dedicated hardware is best for improved performance and security.
haproxy_hosts: *infrastructure_hosts
# rsyslog server
log_hosts:
log1:
ip: 172.29.236.14
###
### OpenStack
###
# keystone
identity_hosts: *infrastructure_hosts
# cinder api services
storage-infra_hosts: *infrastructure_hosts
# cinder volume hosts (Ceph RBD-backed)
storage_hosts: *infrastructure_hosts
# glance
image_hosts: *infrastructure_hosts
# nova api, conductor, etc services
compute-infra_hosts: *infrastructure_hosts
# heat
orchestration_hosts: *infrastructure_hosts
# horizon
dashboard_hosts: *infrastructure_hosts
# neutron server, agents (L3, etc)
network_hosts: *infrastructure_hosts
# ceilometer (telemetry data collection)
metering-infra_hosts: *infrastructure_hosts
# aodh (telemetry alarm service)
metering-alarm_hosts: *infrastructure_hosts
# gnocchi (telemetry metrics storage)
metrics_hosts: *infrastructure_hosts
# ceilometer compute agent (telemetry data collection)
metering-compute_hosts: *compute_hosts
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment