Skip to content

Instantly share code, notes, and snippets.

@intlabs
Created November 27, 2016 23:30
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save intlabs/f4ea0f5105ebc38f54d7610fe8205262 to your computer and use it in GitHub Desktop.
Save intlabs/f4ea0f5105ebc38f54d7610fe8205262 to your computer and use it in GitHub Desktop.
kolla-kube-rendered templates
# kolla-kubernetes resource create bootstrap ceph-bootstrap-initial-mon
apiVersion: batch/v1
kind: Job
metadata: {name: ceph-bootstrap-initial-mon, namespace: kolla}
spec:
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "update-config",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;
STORAGE_INTERFACE=docker0; F=/var/lib/kolla-kubernetes/ceph-mon/storage_interface;
[ -f $F ] && STORAGE_INTERFACE=$(<$F) || true; IP=$(ip addr list docker0
| grep ''inet '' |cut -d'' '' -f6|cut -d/ -f1); CONF=/srv/pod-main-config/ceph.conf;
sed -i ''/^mon host.*/d;/^mon host/d;/^mon addr/d'' $CONF; sed -i ''/^\\[global\\]/a
mon host = ''$IP $CONF; sed -i ''/^\\[global\\]/a mon addr = ''$IP'':6789''
$CONF; sed -i ''/^\\[global\\]/a mon initial members = minikube'' $CONF;"
], "volumeMounts": [ { "name": "ceph-mon-config", "mountPath": "/srv/configmap"
}, { "name": "pod-main-config", "mountPath": "/srv/pod-main-config" }, {
"name": "ceph-mon", "mountPath": "/var/lib/ceph/mon" } ] } ]'}
spec:
containers:
- command: [/bin/bash, -ec, 'export MON_IP=$(grep ''^mon host'' /var/lib/kolla/config_files/ceph.conf
| awk ''{print $4}'');
kolla_start
echo -n ''FETCH_CEPH_KEYS: ''; fetch_ceph_keys.py
']
env:
- {name: KOLLA_BOOTSTRAP, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-ceph-mon:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config, readOnly: true}
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- {mountPath: /var/lib/ceph, name: ceph-mon}
hostNetwork: true
hostPID: true
nodeSelector: {kubernetes.io/hostname: minikube}
restartPolicy: OnFailure
volumes:
- emptyDir: {}
name: pod-main-config
- configMap: {name: ceph-mon}
name: ceph-mon-config
- hostPath: {path: /var/lib/kolla/volumes/ceph-mon}
name: ceph-mon
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
job "ceph-bootstrap-initial-mon" created
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)../kolla-kubernetes/tools/setup-ceph-secrets.sh
Error from server: container "main" in pod "ceph-bootstrap-initial-mon-c9owx" is waiting to start: PodInitializing
failed to read keys.
(.venv)
(.venv)watch kubectl get jobs --namespace=kolla
(.venv)../kolla-kubernetes/tools/setup-ceph-secrets.sh
secret "ceph-monmap" created
secret "ceph-client-radosgw-keyring" created
secret "ceph-client-mon-keyring" created
secret "ceph-client-admin-keyring" created
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)kolla-kubernetes resource create pod ceph-mon
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels: {component: ceph, system: mon}
name: ceph-mon
namespace: kolla
spec:
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "update-config",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;
STORAGE_INTERFACE=docker0; F=/var/lib/kolla-kubernetes/ceph-mon/storage_interface;
[ -f $F ] && STORAGE_INTERFACE=$(<$F); IP=$(ip addr list docker0 | grep
''inet '' |cut -d'' '' -f6|cut -d/ -f1); CONF=/srv/pod-main-config/ceph.conf;
found=0; [ $IP == ''172.17.0.1'' ] && found=1 || true; echo $found > /srv/pod-main-config/inlist;
sed -i ''/^mon host.*/d;/^mon host/d;/^mon addr/d'' $CONF; sed -i ''/^\\[global\\]/a
mon host = ''$IP $CONF; sed -i ''/^\\[global\\]/a mon addr = ''$IP'':6789''
$CONF; sed -i ''/^\\[global\\]/a mon initial members = minikube'' $CONF;
sed -i ''s/@MONID@/''$(hostname)''/;s/@MONADDR@/''$IP''/'' /srv/pod-main-config/config.json;"
], "volumeMounts": [ { "name": "ceph-mon-config", "mountPath": "/srv/configmap"
}, { "name": "pod-main-config", "mountPath": "/srv/pod-main-config" }, {
"name": "ceph-mon", "mountPath": "/var/lib/ceph/mon" } ] } ]'}
labels: {component: ceph, system: mon}
spec:
containers:
- command: [/bin/bash, -ec, "if [ $(cat /var/lib/kolla/config_files/inlist)\
\ == \"0\" ]; then\n echo \"Not a mon... blocking.\";\n while true;\
\ do sleep 10000; done;\nfi;\nkolla_start\n"]
env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-ceph-mon:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files/ceph.conf, name: pod-main-config,
readOnly: true, subPath: ceph.conf}
- {mountPath: /var/lib/kolla/config_files/config.json, name: pod-main-config,
readOnly: true, subPath: config.json}
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- {mountPath: /var/lib/ceph, name: ceph-mon}
- {mountPath: /var/lib/kolla/config_files/ceph.client.mon.keyring, name: ceph-client-mon-keyring,
subPath: data}
- {mountPath: /var/lib/kolla/config_files/ceph.client.admin.keyring, name: ceph-client-admin-keyring,
subPath: data}
- {mountPath: /var/lib/kolla/config_files/ceph.monmap, name: ceph-monmap,
subPath: data}
hostNetwork: true
hostPID: true
nodeSelector: {kolla_controller: 'true'}
volumes:
- emptyDir: {}
name: pod-main-config
- configMap: {name: ceph-mon}
name: ceph-mon-config
- name: ceph-client-mon-keyring
secret: {secretName: ceph-client-mon-keyring}
- name: ceph-client-admin-keyring
secret: {secretName: ceph-client-admin-keyring}
- name: ceph-monmap
secret: {secretName: ceph-monmap}
- hostPath: {path: /var/lib/kolla/volumes/ceph-mon}
name: ceph-mon
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
daemonset "ceph-mon" created
(.venv)
(.venv)
(.venv)watch kubectl get pods --namespace=kolla
(.venv)
(.venv)
(.venv)kolla-kubernetes resource create pod ceph-bootstrap-osd
apiVersion: v1
kind: Pod
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "update-config",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command": [
"/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/; STORAGE_INTERFACE=docker0;
F=/var/lib/kolla-kubernetes/ceph-osd/storage_interface; [ -f $F ] && STORAGE_INTERFACE=$(<$F)
|| true; IP=$(ip addr list docker0 | grep ''inet '' |cut -d'' '' -f6|cut -d/
-f1); CONF=/srv/pod-main-config/ceph.conf; sed -i ''/^mon host.*/d;/^mon host/d;/^mon
addr/d'' $CONF; sed -i ''/^\\[global\\]/a mon host = ''$IP $CONF; sed -i ''/^\\[global\\]/a
mon addr = ''$IP'':6789'' $CONF; sed -i ''/^\\[global\\]/a mon initial members
= minikube'' $CONF;" ], "volumeMounts": [ { "name": "ceph-osd-config", "mountPath":
"/srv/configmap" }, { "name": "pod-main-config", "mountPath": "/srv/pod-main-config"
}, { "name": "ceph-osd", "mountPath": "/var/lib/ceph/osd" } ] } ]'}
name: ceph-bootstrap-osd
namespace: kolla
spec:
containers:
- env:
- {name: KOLLA_BOOTSTRAP, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
- {name: USE_EXTERNAL_JOURNAL, value: 'True'}
- {name: JOURNAL_DEV, value: /dev/loop0}
- {name: JOURNAL_PARTITION_NUM, value: '1'}
- {name: JOURNAL_PARTITION, value: /dev/loop0p1}
- {name: OSD_DEV, value: /dev/loop0}
- {name: OSD_PARTITION_NUM, value: '2'}
- {name: OSD_PARTITION, value: /dev/loop0p2}
- {name: OSD_INITIAL_WEIGHT, value: '1'}
- {name: OSD_FILESYSTEM, value: xfs}
- {name: HOSTNAME, value: minikube}
image: kolla/centos-binary-ceph-osd:2.0.2
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files/ceph.conf, name: pod-main-config, readOnly: true,
subPath: ceph.conf}
- {mountPath: /var/lib/kolla/config_files/config.json, name: pod-main-config,
readOnly: true, subPath: config.json}
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- {mountPath: /var/lib/ceph, name: ceph-osd}
- {mountPath: /var/lib/kolla/config_files/ceph.client.admin.keyring, name: ceph-client-admin-keyring,
subPath: data}
- {mountPath: /dev, name: host-dev}
hostNetwork: true
hostPID: true
nodeSelector: {kubernetes.io/hostname: minikube}
restartPolicy: Never
volumes:
- emptyDir: {}
name: pod-main-config
- configMap: {name: ceph-osd}
name: ceph-osd-config
- hostPath: {path: /var/lib/kolla/volumes/ceph-osd}
name: ceph-osd
- hostPath: {path: /dev}
name: host-dev
- name: ceph-client-admin-keyring
secret: {secretName: ceph-client-admin-keyring}
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
pod "ceph-bootstrap-osd" created
(.venv)watch kubectl get pods ceph-bootstrap-osd --show-all --namespace=kolla
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)kolla-kubernetes resource delete pod ceph-bootstrap-osd
pod "ceph-bootstrap-osd" deleted
(.venv)kolla-kubernetes resource create pod ceph-osd
apiVersion: v1
kind: Pod
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "update-config",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command": [
"/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/; STORAGE_INTERFACE=docker0;
F=/var/lib/kolla-kubernetes/ceph-osd/storage_interface; [ -f $F ] && STORAGE_INTERFACE=$(<$F)
|| true; IP=$(ip addr list docker0 | grep ''inet '' |cut -d'' '' -f6|cut -d/
-f1); CONF=/srv/pod-main-config/ceph.conf; sed -i ''/^mon host.*/d;/^mon host/d;/^mon
addr/d'' $CONF; sed -i ''/^\\[global\\]/a mon host = ''$IP $CONF; sed -i ''/^\\[global\\]/a
mon addr = ''$IP'':6789'' $CONF; sed -i ''/^\\[global\\]/a mon initial members
= minikube'' $CONF; sed -i ''s/@HOSTADDR@/''$IP''/g;s/@CLUSTERADDR@/''$IP''/g''
/srv/pod-main-config/config.json;" ], "volumeMounts": [ { "name": "ceph-osd-config",
"mountPath": "/srv/configmap" }, { "name": "pod-main-config", "mountPath": "/srv/pod-main-config"
}, { "name": "ceph-osd", "mountPath": "/var/lib/ceph/osd" } ] } ]'}
name: ceph-osd
namespace: kolla
spec:
containers:
- command: [/bin/bash, -ec, 'mount /dev/loop0p2 /var/lib/ceph/osd/ceph-0
kolla_start
']
env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
- {name: OSD_ID, value: '0'}
- {name: JOURNAL_PARTITION, value: /dev/loop0p1}
- {name: HOSTNAME, value: minikube}
image: kolla/centos-binary-ceph-osd:2.0.2
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files/ceph.conf, name: pod-main-config, readOnly: true,
subPath: ceph.conf}
- {mountPath: /var/lib/kolla/config_files/config.json, name: pod-main-config,
readOnly: true, subPath: config.json}
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- {mountPath: /var/lib/ceph, name: ceph-osd}
- {mountPath: /var/lib/kolla/config_files/ceph.client.admin.keyring, name: ceph-client-admin-keyring,
subPath: data}
- {mountPath: /dev, name: host-dev}
hostNetwork: true
hostPID: true
nodeSelector: {kubernetes.io/hostname: minikube}
restartPolicy: Never
volumes:
- emptyDir: {}
name: pod-main-config
- configMap: {name: ceph-osd}
name: ceph-osd-config
- hostPath: {path: /var/lib/kolla/volumes/ceph-osd}
name: ceph-osd
- hostPath: {path: /dev}
name: host-dev
- name: ceph-client-admin-keyring
secret: {secretName: ceph-client-admin-keyring}
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
pod "ceph-osd" created
(.venv)watch kubectl get pods ceph-osd --namespace=kolla
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)for x in images volumes vms; do
> kubectl exec ceph-osd -c main --namespace=kolla -- /bin/bash \
> -c "ceph osd pool create $x 64"
> done
pool 'images' created
pool 'volumes' created
pool 'vms' created
(.venv)str="ceph auth get-or-create client.glance mon 'allow r' osd 'allow"
(.venv)str="$str class-read object_prefix rbd_children, allow rwx pool=images'"
(.venv)kubectl exec ceph-osd -c main --namespace=kolla -- /bin/bash -c \
> "$str" > /tmp/$$
a\venv)kubectl create secret generic ceph-client-glance-keyring --namespace=koll
> --from-file=ceph.client.glance.keyring=/tmp/$$
secret "ceph-client-glance-keyring" created
(.venv)str="$str class-read object_prefix rbd_children, allow rwx pool=volumes'"
(.venv)kubectl exec ceph-osd -c main --namespace=kolla -- /bin/bash -c \olumes'"
> "$str" > /tmp/$$
a\venv)kubectl create secret generic ceph-client-cinder-keyring --namespace=koll
> --from-file=ceph.client.cinder.keyring=/tmp/$$
secret "ceph-client-cinder-keyring" created
(.venv)str="ceph auth get-or-create client.nova mon 'allow r' osd 'allow "
".venv)str="$str class-read object_prefix rbd_children, allow rwx pool=volumes,
(.venv)str="$str allow rwx pool=vms, allow rwx pool=images'"
(.venv)kubectl exec ceph-osd -c main --namespace=kolla -- /bin/bash -c \
> "$str" > /tmp/$$
\.venv)kubectl create secret generic ceph-client-nova-keyring --namespace=kolla
> --from-file=ceph.client.nova.keyring=/tmp/$$
secret "ceph-client-nova-keyring" created
(.venv)kubectl create secret generic nova-libvirt-bin --namespace=kolla \
> --from-file=data=<(awk '{if($1 == "key"){print $3}}' /tmp/$$ |
> tr -d '\n')
secret "nova-libvirt-bin" created
(.venv)kubectl exec ceph-osd -c main --namespace=kolla -- /bin/bash -c \
> "cat /etc/ceph/ceph.conf" > /tmp/$$
(.venv)kubectl create configmap ceph-conf --namespace=kolla \
> --from-file=ceph.conf=/tmp/$$
configmap "ceph-conf" created
(.venv)rm -f /tmp/$$
(.venv)kolla-kubernetes resource create secret nova-libvirt
apiVersion: v1
data: {data: PHNlY3JldCBlcGhlbWVyYWw9J25vJyBwcml2YXRlPSdubyc+CiAgPHV1aWQ+ZjY4ZmVjMWItMTgzMS00ZmQ4LTk5ZDEtNTQ4N2M0NjA5YzViPC91dWlkPgogIDx1c2FnZSB0eXBlPSdjZXBoJz4KICAgIDxuYW1lPmNsaWVudC5ub3ZhIHNlY3JldDwvbmFtZT4KICA8L3VzYWdlPgo8L3NlY3JldD4=}
kind: Secret
metadata: {name: nova-libvirt, namespace: kolla}
type: Opaque
secret "nova-libvirt" created
(.venv)
(.venv)
for x in mariadb rabbitmq glance; do
> kolla-kubernetes resource create pv $x
> kolla-kubernetes resource create pvc $x
> done
apiVersion: v1
kind: PersistentVolume
metadata: {name: mariadb}
spec:
accessModes: [ReadWriteOnce]
capacity: {storage: 10Gi}
hostPath: {path: /var/lib/kolla/volumes/mariadb}
persistentvolume "mariadb" created
apiVersion: v1
kind: PersistentVolumeClaim
metadata: {name: mariadb, namespace: kolla}
spec:
accessModes: [ReadWriteOnce]
resources:
requests: {storage: 10}
persistentvolumeclaim "mariadb" created
apiVersion: v1
kind: PersistentVolume
metadata: {name: rabbitmq}
spec:
accessModes: [ReadWriteOnce]
capacity: {storage: 10Gi}
hostPath: {path: /var/lib/kolla/volumes/rabbitmq}
persistentvolume "rabbitmq" created
apiVersion: v1
kind: PersistentVolumeClaim
metadata: {name: rabbitmq, namespace: kolla}
spec:
accessModes: [ReadWriteOnce]
resources:
requests: {storage: 10}
persistentvolumeclaim "rabbitmq" created
apiVersion: v1
kind: PersistentVolume
metadata: {name: glance}
spec:
accessModes: [ReadWriteOnce]
capacity: {storage: 10Gi}
hostPath: {path: /var/lib/kolla/volumes/glance}
persistentvolume "glance" created
apiVersion: v1
kind: PersistentVolumeClaim
metadata: {name: glance, namespace: kolla}
spec:
accessModes: [ReadWriteOnce]
resources:
requests: {storage: 10}
persistentvolumeclaim "glance" created
(.venv)for x in mariadb memcached keystone-admin keystone-public rabbitmq \
> rabbitmq-management nova-api glance-api glance-registry \
> neutron-server nova-metadata nova-novncproxy horizon \
> cinder-api; \
> do
> kolla-kubernetes resource create svc $x
> done
apiVersion: v1
kind: Service
metadata: {name: mariadb, namespace: kolla}
spec:
ports:
- {name: mariadb, port: 3306}
selector: {service: mariadb}
service "mariadb" created
apiVersion: v1
kind: Service
metadata: {name: memcached, namespace: kolla}
spec:
ports:
- {name: memcached, port: 11211}
selector: {service: memcached}
service "memcached" created
apiVersion: v1
kind: Service
metadata: {name: keystone-admin, namespace: kolla}
spec:
ports:
- {name: keystone-admin, port: 35357}
selector: {service: keystone, type: api}
service "keystone-admin" created
apiVersion: v1
kind: Service
metadata: {name: keystone-public, namespace: kolla}
spec:
externalIPs: [192.168.42.160]
ports:
- {name: keystone-public, port: 5000, targetPort: 5000}
selector: {service: keystone, type: api}
service "keystone-public" created
apiVersion: v1
kind: Service
metadata: {name: rabbitmq, namespace: kolla}
spec:
ports:
- {name: rabbitmq, port: 5672}
selector: {service: rabbitmq}
service "rabbitmq" created
apiVersion: v1
kind: Service
metadata: {name: rabbitmq-management, namespace: kolla}
spec:
externalIPs: [192.168.42.160]
ports:
- {name: management, port: 15672}
selector: {service: rabbitmq}
service "rabbitmq-management" created
apiVersion: v1
kind: Service
metadata: {name: nova-api, namespace: kolla}
spec:
externalIPs: [192.168.42.160]
ports:
- {name: nova-api, port: 8774, targetPort: 8774}
selector: {service: nova, type: api}
service "nova-api" created
apiVersion: v1
kind: Service
metadata: {name: glance-api, namespace: kolla}
spec:
externalIPs: [192.168.42.160]
ports:
- {name: glance-api, port: 9292, targetPort: 9292}
selector: {service: glance, type: api}
service "glance-api" created
apiVersion: v1
kind: Service
metadata: {name: glance-registry, namespace: kolla}
spec:
externalIPs: [192.168.42.160]
ports:
- {name: glance-registry, port: 9191, targetPort: 9191}
selector: {service: glance, type: registry}
service "glance-registry" created
apiVersion: v1
kind: Service
metadata: {name: neutron-server, namespace: kolla}
spec:
externalIPs: [192.168.42.160]
ports:
- {name: neutron-server, port: 9696, targetPort: 9696}
selector: {service: neutron, type: server}
service "neutron-server" created
apiVersion: v1
kind: Service
metadata: {name: nova-metadata, namespace: kolla}
spec:
externalIPs: [192.168.42.160]
ports:
- {name: nova-metadata, port: 8775, targetPort: 8775}
selector: {service: nova, type: api}
service "nova-metadata" created
apiVersion: v1
kind: Service
metadata: {name: nova-novncproxy, namespace: kolla}
spec:
externalIPs: [192.168.42.160]
ports:
- {name: nova-novncproxy, port: 6080, targetPort: 6080}
selector: {service: nova, type: novncproxy}
service "nova-novncproxy" created
apiVersion: v1
kind: Service
metadata: {name: horizon, namespace: kolla}
spec:
externalIPs: [192.168.42.160]
ports:
- {name: http, port: 80, targetPort: 80}
- {name: https, port: 443, targetPort: 443}
selector: {service: horizon}
service "horizon" created
apiVersion: v1
kind: Service
metadata: {name: cinder-api, namespace: kolla}
spec:
externalIPs: [192.168.42.160]
ports:
- {name: cinder-api, port: 8776, targetPort: 8776}
selector: {service: cinder, type: api}
service "cinder-api" created
(.venv)
(.venv)for x in mariadb-bootstrap rabbitmq-bootstrap; do
> kolla-kubernetes resource create bootstrap $x
> done
apiVersion: batch/v1
kind: Job
metadata: {name: mariadb-bootstrap, namespace: kolla}
spec:
template:
spec:
containers:
- env:
- {name: KOLLA_KUBERNETES, value: ''}
- {name: KOLLA_BOOTSTRAP, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
- {name: DB_ROOT_PASSWORD, value: K5zyKDRLG8HvT65aZl1VKsLC9VX6Gv88sIJjMYYz}
image: kolla/centos-binary-mariadb:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: mariadb-config}
- {mountPath: /var/lib/mysql, name: mariadb-persistent-storage}
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap: {name: mariadb}
name: mariadb-config
- name: mariadb-persistent-storage
persistentVolumeClaim: {claimName: mariadb}
- emptyDir: {}
name: kolla-logs
job "mariadb-bootstrap" created
apiVersion: batch/v1
kind: Job
metadata: {name: rabbitmq-bootstrap, namespace: kolla}
spec:
template:
spec:
containers:
- env:
- {name: KOLLA_BOOTSTRAP, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
- {name: RABBITMQ_CLUSTER_COOKIE, value: w8A2DDpjSKT8yQiUeElFsoWCnMgLl7PL5bGLRakh}
image: kolla/centos-binary-rabbitmq:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: rabbitmq-config}
- {mountPath: /var/lib/rabbitmq, name: rabbitmq-persistent-storage}
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap: {name: rabbitmq}
name: rabbitmq-config
- name: rabbitmq-persistent-storage
persistentVolumeClaim: {claimName: rabbitmq}
- emptyDir: {}
name: kolla-logs
job "rabbitmq-bootstrap" created
(.venv)
(.venv)
(.venv)
(.venv)for x in mariadb-bootstrap rabbitmq-bootstrap; do
> kolla-kubernetes resource delete bootstrap $x
> done
job "mariadb-bootstrap" deleted
job "rabbitmq-bootstrap" deleted
(.venv)for x in mariadb memcached rabbitmq; do
> kolla-kubernetes resource create pod $x
> done
apiVersion: apps/v1alpha1
kind: PetSet
metadata: {name: mariadb, namespace: kolla}
spec:
replicas: 1
serviceName: mariadb
template:
metadata:
annotations: {pod.alpha.kubernetes.io/initialized: 'true'}
labels: {service: mariadb}
spec:
containers:
- env:
- {name: KOLLA_KUBERNETES, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: database-password}
image: kolla/centos-binary-mariadb:2.0.2
lifecycle:
preStop:
exec:
command: [mysqladmin -u root -p$DATABASE_PASSWORD shutdown]
name: main
ports:
- {containerPort: 3306, name: mariadb}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: mariadb-config}
- {mountPath: /var/lib/mysql, name: mariadb-persistent-storage}
- {mountPath: /var/log/kolla/, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
terminationGracePeriodSeconds: 120
volumes:
- configMap: {name: mariadb}
name: mariadb-config
- name: mariadb-persistent-storage
persistentVolumeClaim: {claimName: mariadb}
- emptyDir: {}
name: kolla-logs
petset "mariadb" created
apiVersion: v1
kind: ReplicationController
metadata: {name: memcached, namespace: kolla}
spec:
replicas: 1
selector: {service: memcached}
template:
metadata:
labels: {service: memcached}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-memcached:2.0.2
name: main
ports:
- {containerPort: 11211, name: memcached}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: memcached-config}
nodeSelector: {kolla_controller: 'true'}
volumes:
- configMap: {name: memcached}
name: memcached-config
replicationcontroller "memcached" created
apiVersion: apps/v1alpha1
kind: PetSet
metadata: {name: rabbitmq, namespace: kolla}
spec:
replicas: 1
serviceName: rabbitmq
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "update-config",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;
sed -i ''/^export ERL_EPMD_ADDRESS=0.0.0.0$/d'' /srv/pod-main-config/rabbitmq-env.conf;"
], "volumeMounts": [ { "name": "rabbitmq-config", "mountPath": "/srv/configmap"
}, { "name": "pod-main-config", "mountPath": "/srv/pod-main-config" } ]
} ]', pod.alpha.kubernetes.io/initialized: 'true'}
labels: {service: rabbitmq}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-rabbitmq:2.0.2
name: main
ports:
- {containerPort: 5672, name: rabbitmq}
- {containerPort: 15672, name: management}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config}
- {mountPath: /var/lib/rabbitmq, name: rabbitmq-persistent-storage}
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
volumes:
- emptyDir: {}
name: pod-main-config
- configMap: {name: rabbitmq}
name: rabbitmq-config
- name: rabbitmq-persistent-storage
persistentVolumeClaim: {claimName: rabbitmq}
- emptyDir: {}
name: kolla-logs
petset "rabbitmq" created
(.venv)watch kubectl get pods --namespace kolla
(.venv)^C
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)watch kubectl get pods --namespace kolla
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)for x in keystone-create-db keystone-endpoints keystone-manage-db; do
> kolla-kubernetes resource create bootstrap $x
> done
apiVersion: batch/v1
kind: Job
metadata: {name: keystone-create-db, namespace: kolla}
spec:
template:
spec:
containers:
- args: [ansible localhost -m mysql_db -a "login_host='mariadb' login_port='3306'
login_user='root' login_password='$DATABASE_PASSWORD' name='keystone'"]
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: database-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: main
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- args: ['ansible localhost -m mysql_user -a "login_host=''mariadb'' login_port=''3306''
login_user=''root'' login_password=''$DATABASE_PASSWORD'' name=''keystone''
password=''$KEYSTONE_DATABASE_PASSWORD'' host=''%'' priv=''keystone.*:ALL''
append_privs=''yes''"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: database-password}
- name: KEYSTONE_DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-database-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-keystone-user-and-permissions
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
job "keystone-create-db" created
apiVersion: batch/v1
kind: Job
metadata: {name: keystone-endpoints, namespace: kolla}
spec:
template:
spec:
containers:
- command: [sh, -c, 'sudo -E kolla_set_configs; kolla_keystone_bootstrap admin
$KEYSTONE_ADMIN_PASSWORD admin admin http://keystone-admin:35357/v3 http://keystone-internal:5000/v3
http://192.168.42.160:5000/v3 RegionOne']
env:
- {name: KOLLA_KUBERNETES, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kolla/centos-binary-keystone:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: keystone-config, readOnly: true}
- {mountPath: /var/log/kolla/keystone, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap: {name: keystone}
name: keystone-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
job "keystone-endpoints" created
apiVersion: batch/v1
kind: Job
metadata: {name: keystone-manage-db, namespace: kolla}
spec:
template:
spec:
containers:
- env:
- {name: KOLLA_KUBERNETES, value: ''}
- {name: KOLLA_BOOTSTRAP, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-keystone:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: keystone-config, readOnly: true}
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap: {name: keystone}
name: keystone-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
job "keystone-manage-db" created
(.venv)watch kubectl get jobs --namespace kolla
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)for x in keystone-create-db keystone-endpoints keystone-manage-db; do
> kolla-kubernetes resource delete bootstrap $x
> done
job "keystone-create-db" deleted
job "keystone-endpoints" deleted
job "keystone-manage-db" deleted
(.venv)kolla-kubernetes resource create pod keystone
apiVersion: v1
kind: ReplicationController
metadata: {name: keystone, namespace: kolla}
spec:
replicas: 1
selector: {service: keystone}
template:
metadata:
labels: {service: keystone, type: api}
spec:
containers:
- env:
- {name: KOLLA_KUBERNETES, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-keystone:2.0.2
name: main
ports:
- {containerPort: 35357, name: keystone-admin}
- {containerPort: 5000, name: keystone-public}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: keystone-config}
- {mountPath: /var/log/kolla/keystone, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
volumes:
- configMap: {name: keystone}
name: keystone-config
- emptyDir: {}
name: kolla-logs
replicationcontroller "keystone" created
(.venv)watch kubectl get pods --namespace=kolla
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)for x in glance-create-db glance-endpoints glance-manage-db \
> nova-create-api-db nova-create-endpoints nova-create-db \
> neutron-create-db neutron-endpoints neutron-manage-db \
> cinder-create-db cinder-create-endpoints cinder-manage-db; \
> do
> kolla-kubernetes resource create bootstrap $x
> done
apiVersion: batch/v1
kind: Job
metadata: {name: glance-create-db, namespace: kolla}
spec:
completions: 1
parallelism: 1
template:
spec:
containers:
- args: [ansible localhost -m mysql_db -a "login_host=mariadb login_port='3306'
login_user='root' login_password='$DATABASE_PASSWORD' name='glance'"]
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: database-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-glance-database
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- args: ['ansible localhost -m mysql_user -a "login_host=mariadb login_port=''3306''
login_user=''root'' login_password=''$DATABASE_PASSWORD'' name=''glance''
password=''$GLANCE_DATABASE_PASSWORD'' host=''%'' priv=''glance.*:ALL''
append_privs=''yes''"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: database-password}
- name: GLANCE_DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: glance-database-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-glance-user-and-permissions
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- emptyDir: {}
name: kolla-logs
- hostPath: {path: /etc/localtime}
name: etc-localtime
job "glance-create-db" created
apiVersion: batch/v1
kind: Job
metadata: {name: glance-endpoints, namespace: kolla}
spec:
completions: 1
parallelism: 1
template:
spec:
containers:
- args: ['ansible localhost -m kolla_keystone_service -a "service_name=glance
service_type=image description=''Openstack Image'' endpoint_region=RegionOne
url=''http://glance-api:9292'' interface=admin region_name=RegionOne auth={{
openstack_glance_auth }}" "-e" "{''openstack_glance_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-glance-service-and-endpoint-admin
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- args: ['ansible localhost -m kolla_keystone_service -a "service_name=glance
service_type=image description=''Openstack Image'' endpoint_region=RegionOne
url=''http://glance-api:9292'' interface=internal region_name=RegionOne
auth={{ openstack_glance_auth }}" "-e" "{''openstack_glance_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-glance-service-and-endpoint-internal
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- args: ['ansible localhost -m kolla_keystone_service -a "service_name=glance
service_type=image description=''Openstack Image'' endpoint_region=RegionOne
url=''http://192.168.42.160:9292'' interface=public region_name=RegionOne
auth={{ openstack_glance_auth }}" "-e" "{''openstack_glance_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-glance-service-and-endpoint-public
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- args: ['ansible localhost -m kolla_keystone_user -a "project=service user=glance
password=3W1ctxRUXGu965JHuk7Uyz2KUCzi2zb3cJ6dMMkc role=admin region_name=RegionOne
auth={{ openstack_glance_auth }}" "-e" "{''openstack_glance_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-glance-user-project-role
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- emptyDir: {}
name: kolla-logs
- hostPath: {path: /etc/localtime}
name: etc-localtime
job "glance-endpoints" created
apiVersion: batch/v1
kind: Job
metadata: {name: glance-manage-db, namespace: kolla}
spec:
completions: 1
parallelism: 1
template:
spec:
containers:
- env:
- {name: KOLLA_BOOTSTRAP, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-glance-api:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files/config.json, name: glance-api-config,
readOnly: true, subPath: config.json}
- {mountPath: /var/lib/kolla/config_files/glance-api.conf, name: glance-api-config,
readOnly: true, subPath: glance-api.conf}
- {mountPath: /var/lib/kolla/config_files/ceph.conf, name: ceph-conf, subPath: ceph.conf}
- {mountPath: /var/lib/kolla/config_files/ceph.client.glance.keyring, name: ceph-conf,
subPath: ceph.conf}
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap: {name: glance-api}
name: glance-api-config
- configMap: {name: ceph-conf}
name: ceph-conf
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
job "glance-manage-db" created
apiVersion: batch/v1
kind: Job
metadata: {name: nova-create-api-db, namespace: kolla}
spec:
completions: 1
parallelism: 1
template:
spec:
containers:
- env:
- {name: KOLLA_BOOTSTRAP, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/ubuntu-binary-nova-api:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: nova-api-config}
- {mountPath: /etc/localtime, name: etc-localtime}
- {mountPath: /lib/modules, name: lib-modules}
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap: {name: nova-api}
name: nova-api-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- hostPath: {path: /lib/modules}
name: lib-modules
- emptyDir: {}
name: kolla-logs
job "nova-create-api-db" created
apiVersion: batch/v1
kind: Job
metadata: {name: nova-create-endpoints, namespace: kolla}
spec:
completions: 1
parallelism: 1
template:
spec:
containers:
- args: ['/usr/bin/ansible localhost -vvvv -m kolla_keystone_service -a "service_name=nova
service_type=compute description=''Openstack Compute'' endpoint_region=RegionOne
url=''http://nova-api:8774/v2/%(tenant_id)s'' interface=''admin'' region_name=RegionOne
auth={{ openstack_nova_auth }}" -e "{''openstack_nova_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"
&& /usr/bin/ansible localhost -vvvv -m kolla_keystone_service -a "service_name=nova
service_type=compute description=''Openstack Compute'' endpoint_region=RegionOne
url=''http://nova-api:8774/v2/%(tenant_id)s'' interface=''internal'' region_name=RegionOne
auth={{ openstack_nova_auth }}" -e "{''openstack_nova_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"
&& /usr/bin/ansible localhost -vvvv -m kolla_keystone_service -a "service_name=nova
service_type=compute description=''Openstack Compute'' endpoint_region=RegionOne
url=''http://192.168.42.160:8774/v2/%(tenant_id)s'' interface=''public''
region_name=RegionOne auth={{ openstack_nova_auth }}" -e "{''openstack_nova_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"
&& /usr/bin/ansible localhost -vvvv -m kolla_keystone_user -a "project=service
user=nova password=$NOVA_KEYSTONE_PASSWORD role=admin region_name=RegionOne
auth={{ openstack_nova_auth }}" -e "{''openstack_nova_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
- name: NOVA_KEYSTONE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: nova-keystone-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: main
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap: {name: nova-api}
name: nova-api-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- hostPath: {path: /lib/modules}
name: lib-modules
- emptyDir: {}
name: kolla-logs
job "nova-create-endpoints" created
apiVersion: batch/v1
kind: Job
metadata: {name: nova-create-db, namespace: kolla}
spec:
completions: 1
parallelism: 1
template:
spec:
containers:
- args: ['ansible localhost -m mysql_db -a "login_host=mariadb login_port=''3306''
login_user=''root'' login_password=''$DATABASE_PASSWORD'' name=''nova''"
&& ansible localhost -m mysql_db -a "login_host=mariadb login_port=''3306''
login_user=''root'' login_password=''$DATABASE_PASSWORD'' name=''nova_api''"
&& ansible localhost -m mysql_user -a "login_host=mariadb login_port=''3306''
login_user=''root'' login_password=''$DATABASE_PASSWORD'' name=''nova''
password=''$NOVA_DATABASE_PASSWORD'' host=''%'' priv=''nova.*:ALL'' append_privs=''yes''"
&& ansible localhost -m mysql_user -a "login_host=mariadb login_port=''3306''
login_user=''root'' login_password=''$DATABASE_PASSWORD'' name=''nova_api''
password=''$NOVA_API_DATABASE_PASSWORD'' host=''%'' priv=''nova_api.*:ALL''
append_privs=''yes''"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: database-password}
- name: NOVA_DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: nova-database-password}
- name: NOVA_API_DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: nova-api-database-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: main
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap: {name: nova-api}
name: nova-api-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- hostPath: {path: /lib/modules}
name: lib-modules
- emptyDir: {}
name: kolla-logs
job "nova-create-db" created
apiVersion: batch/v1
kind: Job
metadata: {name: neutron-create-db, namespace: kolla}
spec:
template:
spec:
containers:
- args: [ansible localhost -m mysql_db -a "login_host='mariadb' login_port='3306'
login_user='root' login_password='$DATABASE_PASSWORD' name='neutron'"]
command: [bash, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: database-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: main
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- args: ['ansible localhost -m mysql_user -a "login_host=''mariadb'' login_port=''3306''
login_user=''root'' login_password=''$DATABASE_PASSWORD'' name=''neutron''
password=''$NEUTRON_DATABASE_PASSWORD'' host=''%'' priv=''neutron.*:ALL''
append_privs=''yes''"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: database-password}
- name: NEUTRON_DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: neutron-database-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-user-permissions
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
job "neutron-create-db" created
apiVersion: batch/v1
kind: Job
metadata: {name: neutron-endpoints, namespace: kolla}
spec:
template:
spec:
containers:
- args: ['/usr/bin/ansible localhost -m kolla_keystone_service -a "service_name=neutron
service_type=network description=''Openstack Networking'' endpoint_region=RegionOne
url=''http://neutron-server:9696'' interface=''admin'' region_name=RegionOne
auth={{ openstack_neutron_auth }}" -e "{''openstack_neutron_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"
&& /usr/bin/ansible localhost -m kolla_keystone_service -a "service_name=neutron
service_type=network description=''Openstack Networking'' endpoint_region=RegionOne
url=''http://neutron-server:9696'' interface=''internal'' region_name=RegionOne
auth={{ openstack_neutron_auth }}" -e "{''openstack_neutron_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"
&& /usr/bin/ansible localhost -m kolla_keystone_service -a "service_name=neutron
service_type=network description=''Openstack Networking'' endpoint_region=RegionOne
url=''http://192.168.42.160:9696'' interface=''public'' region_name=RegionOne
auth={{ openstack_neutron_auth }}" -e "{''openstack_neutron_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"
&& /usr/bin/ansible localhost -m kolla_keystone_user -a "project=service
user=neutron password=$NEUTRON_KEYSTONE_PASSWORD role=admin region_name=RegionOne
auth={{ openstack_neutron_auth }}" -e "{''openstack_neutron_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: NEUTRON_KEYSTONE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: neutron-keystone-password}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: main
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
job "neutron-endpoints" created
apiVersion: batch/v1
kind: Job
metadata: {name: neutron-manage-db, namespace: kolla}
spec:
template:
spec:
containers:
- env:
- {name: KOLLA_BOOTSTRAP, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-neutron-server:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: neutron-server-config, readOnly: true}
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap:
items:
- {key: neutron.conf, path: neutron.conf}
- {key: config.json, path: config.json}
- {key: ml2-conf.ini, path: ml2_conf.ini}
- {key: neutron-lbaas.conf, path: neutron_lbaas.conf}
- {key: neutron-vpnaas.conf, path: neutron_vpnaas.conf}
name: neutron-server
name: neutron-server-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
job "neutron-manage-db" created
apiVersion: batch/v1
kind: Job
metadata: {name: cinder-create-db, namespace: kolla}
spec:
completions: 1
parallelism: 1
template:
spec:
containers:
- args: [ansible localhost -m mysql_db -a "login_host=mariadb login_port='3306'
login_user='root' login_password='$DATABASE_PASSWORD' name='cinder'"]
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: database-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-cinder-database
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- args: ['ansible localhost -m mysql_user -a "login_host=mariadb login_port=''3306''
login_user=''root'' login_password=''$DATABASE_PASSWORD'' name=''cinder''
password=''$CINDER_DATABASE_PASSWORD'' host=''%'' priv=''cinder.*:ALL''
append_privs=''yes''"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: database-password}
- name: CINDER_DATABASE_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: cinder-database-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-cinder-user-and-permissions
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap: {name: cinder-api}
name: cinder-api-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
job "cinder-create-db" created
apiVersion: batch/v1
kind: Job
metadata: {name: cinder-create-endpoints, namespace: kolla}
spec:
completions: 1
parallelism: 1
template:
spec:
containers:
- args: ['ansible localhost -m kolla_keystone_service -a "service_name=cinder
service_type=volume description=''Openstack Image'' endpoint_region=RegionOne
url=''http://cinder-api:8776/v2/%(tenant_id)s'' interface=admin region_name=RegionOne
auth={{ openstack_cinder_auth }}" "-e" "{''openstack_cinder_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-cinder-service-and-endpoint-admin
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- args: ['ansible localhost -m kolla_keystone_service -a "service_name=cinder
service_type=volume description=''Openstack Image'' endpoint_region=RegionOne
url=''http://cinder-api:8776/v2/%(tenant_id)s'' interface=internal region_name=RegionOne
auth={{ openstack_cinder_auth }}" "-e" "{''openstack_cinder_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-cinder-service-and-endpoint-internal
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- args: ['ansible localhost -m kolla_keystone_service -a "service_name=cinder
service_type=volume description=''Openstack Image'' endpoint_region=RegionOne
url=''http://192.168.42.160:8776/v2/%(tenant_id)s'' interface=public region_name=RegionOne
auth={{ openstack_cinder_auth }}" "-e" "{''openstack_cinder_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-cinder-service-and-endpoint-public
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- args: ['ansible localhost -m kolla_keystone_service -a "service_name=cinderv2
service_type=volumev2 description=''Openstack Image'' endpoint_region=RegionOne
url=''http://cinder-api:8776/v2/%(tenant_id)s'' interface=admin region_name=RegionOne
auth={{ openstack_cinder_auth }}" "-e" "{''openstack_cinder_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-cinderv2-service-and-endpoint-admin
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- args: ['ansible localhost -m kolla_keystone_service -a "service_name=cinderv2
service_type=volumev2 description=''Openstack Image'' endpoint_region=RegionOne
url=''http://cinder-api:8776/v2/%(tenant_id)s'' interface=internal region_name=RegionOne
auth={{ openstack_cinder_auth }}" "-e" "{''openstack_cinder_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-cinderv2-service-and-endpoint-internal
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- args: ['ansible localhost -m kolla_keystone_service -a "service_name=cinderv2
service_type=volumev2 description=''Openstack Image'' endpoint_region=RegionOne
url=''http://192.168.42.160:8776/v2/%(tenant_id)s'' interface=public region_name=RegionOne
auth={{ openstack_cinder_auth }}" "-e" "{''openstack_cinder_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-cinderv2-service-and-endpoint-public
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
- args: ['ansible localhost -m kolla_keystone_user -a "project=service user=cinder
password=8rojnYV9r000nTH8TCY9CMghu2m2YtYiyw8XBx75 role=admin region_name=RegionOne
auth={{ openstack_cinder_auth }}" "-e" "{''openstack_cinder_auth'':{''auth_url'':''http://keystone-admin:35357'',''username'':''admin'',''password'':''$KEYSTONE_ADMIN_PASSWORD'',''project_name'':''admin'',''domain_name'':''default''}}"']
command: [sh, -c]
env:
- {name: ANSIBLE_NOCOLOR, value: '1'}
- {name: ANSIBLE_LIBRARY, value: /usr/share/ansible}
- name: KEYSTONE_ADMIN_PASSWORD
valueFrom:
secretKeyRef: {key: password, name: keystone-admin-password}
image: kfox1111/centos-binary-kolla-toolbox:trunk-sometime
name: creating-cinder-user-project-role
volumeMounts:
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap: {name: cinder-api}
name: cinder-api-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
job "cinder-create-endpoints" created
apiVersion: batch/v1
kind: Job
metadata: {name: cinder-manage-db, namespace: kolla}
spec:
completions: 1
parallelism: 1
template:
spec:
containers:
- env:
- {name: KOLLA_BOOTSTRAP, value: ''}
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-cinder-api:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: cinder-api-config, readOnly: true}
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
volumes:
- configMap: {name: cinder-api}
name: cinder-api-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
job "cinder-manage-db" created
(.venv)watch kubectl get jobs --namespace=kolla
(.venv)]
bash: ]: command not found
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)for x in glance-create-db glance-endpoints glance-manage-db \
> nova-create-api-db nova-create-endpoints nova-create-db \
> neutron-create-db neutron-endpoints neutron-manage-db \
> cinder-create-db cinder-create-endpoints cinder-manage-db; \
> do
> kolla-kubernetes resource delete bootstrap $x
> done
job "glance-create-db" deleted
job "glance-endpoints" deleted
job "glance-manage-db" deleted
job "nova-create-api-db" deleted
job "nova-create-endpoints" deleted
job "nova-create-db" deleted
job "neutron-create-db" deleted
job "neutron-endpoints" deleted
job "neutron-manage-db" deleted
job "cinder-create-db" deleted
job "cinder-create-endpoints" deleted
job "cinder-manage-db" deleted
(.venv)for x in nova-api nova-conductor nova-scheduler glance-api \
> glance-registry neutron-server horizon nova-consoleauth \
> nova-novncproxy cinder-api cinder-scheduler \
> cinder-volume-ceph; \
> do
> kolla-kubernetes resource create pod $x
> done
apiVersion: extensions/v1beta1
kind: Deployment
metadata: {name: nova-api, namespace: kolla}
spec:
replicas: 1
strategy:
rollingUpdate: {maxSurge: 1, maxUnavailable: 1}
type: RollingUpdate
template:
metadata:
annotations: {kolla_upgrade: minor_rolling_safe, pod.alpha.kubernetes.io/init-containers: '[
{ "name": "update-config", "image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime",
"command": [ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;
CONF=/srv/pod-main-config/nova.conf; crudini --set $CONF DEFAULT metadata_listen
127.0.0.1; crudini --set $CONF DEFAULT metadata_listen_port 8081; crudini
--set $CONF DEFAULT osapi_compute_listen 127.0.0.1; crudini --set $CONF
DEFAULT osapi_compute_listen_port 8080; "], "volumeMounts": [ { "name":
"service-configmap", "mountPath": "/srv/configmap" }, { "name": "pod-main-config",
"mountPath": "/srv/pod-main-config" } ] } ]'}
labels: {service: nova, type: api}
spec:
containers:
- command: [/bin/bash, -c, 'kolla_start;
touch /var/lib/kolla-kubernetes/event/shutdown;
']
env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-haproxy:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'kill -USR1 $(</var/run/haproxy.pid);
while true; do sleep 1000; done
']
name: haproxy
ports:
- {containerPort: 8774, name: nova-api}
- {containerPort: 8775, name: nova-metadata}
readinessProbe:
httpGet: {path: /healthcheck, port: 8774}
initialDelaySeconds: 5
timeoutSeconds: 5
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: haproxy-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/ubuntu-binary-nova-api:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'while true; do sleep 1; [ -f /var/lib/kolla-kubernetes/event/shutdown
] && break; done']
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- {mountPath: /lib/modules, name: lib-modules}
nodeSelector: {kolla_controller: 'true'}
terminationGracePeriodSeconds: 172800
volumes:
- emptyDir: {}
name: kolla-kubernetes-events
- emptyDir: {}
name: pod-main-config
- configMap: {name: nova-api}
name: service-configmap
- configMap: {name: nova-api-haproxy}
name: haproxy-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
- hostPath: {path: /lib/modules}
name: lib-modules
deployment "nova-api" created
apiVersion: apps/v1alpha1
kind: PetSet
metadata: {name: nova-conductor, namespace: kolla}
spec:
replicas: 1
serviceName: nova
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "initialize-nova-conductor",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "sh", "-ce", "cp -a /config/..data/* /nova/; "], "volumeMounts": [ { "name":
"nova-conductor-config", "mountPath": "/config/" }, { "name": "nova-config",
"mountPath": "/nova/" } ] } ]', pod.alpha.kubernetes.io/initialized: 'true'}
labels: {service: nova, type: conductor}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/ubuntu-binary-nova-conductor:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: nova-config}
- {mountPath: /etc/localtime, name: etc-localtime}
- {mountPath: /nova, name: nova-config}
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
volumes:
- configMap: {name: nova-conductor}
name: nova-conductor-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- hostPath: {path: /lib/modules}
name: lib-modules
- emptyDir: {}
name: nova-config
- emptyDir: {}
name: kolla-logs
petset "nova-conductor" created
apiVersion: apps/v1alpha1
kind: PetSet
metadata: {name: nova-scheduler, namespace: kolla}
spec:
replicas: 1
serviceName: nova
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "initialize-nova-scheduler",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "sh", "-c", "cp -a /config/..data/* /nova/; "], "volumeMounts": [ { "name":
"nova-scheduler-config", "mountPath": "/config/" }, { "name": "nova-config",
"mountPath": "/nova/" } ] } ]', pod.alpha.kubernetes.io/initialized: 'true'}
labels: {service: nova, type: scheduler}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/ubuntu-binary-nova-scheduler:2.0.2
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: nova-config}
- {mountPath: /etc/localtime, name: etc-localtime}
- {mountPath: /nova, name: nova-config}
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
volumes:
- configMap: {name: nova-scheduler}
name: nova-scheduler-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- hostPath: {path: /lib/modules}
name: lib-modules
- emptyDir: {}
name: nova-config
- emptyDir: {}
name: kolla-logs
petset "nova-scheduler" created
apiVersion: extensions/v1beta1
kind: Deployment
metadata: {name: glance-api, namespace: kolla}
spec:
replicas: 1
strategy:
rollingUpdate: {maxSurge: 1, maxUnavailable: 1}
type: RollingUpdate
template:
metadata:
annotations: {kolla_upgrade: minor_rolling_safe, pod.alpha.kubernetes.io/init-containers: '[
{ "name": "update-config", "image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime",
"command": [ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;
cp -a /srv/ceph.conf /srv/pod-main-config/; cp -a /srv/ceph.client.glance.keyring
/srv/pod-main-config/; CONF=/srv/pod-main-config/glance-api.conf; crudini
--set $CONF DEFAULT bind_host 127.0.0.1; crudini --set $CONF DEFAULT bind_port
8080;" ], "volumeMounts": [ { "name": "ceph-conf", "mountPath": "/srv/ceph.conf",
"subPath": "ceph.conf" }, { "name": "ceph-client-glance-keyring", "mountPath":
"/srv/ceph.client.glance.keyring", "subPath": "ceph.client.glance.keyring"
}, { "name": "service-configmap", "mountPath": "/srv/configmap" }, { "name":
"pod-main-config", "mountPath": "/srv/pod-main-config" } ] } ]'}
labels: {service: glance, type: api}
spec:
containers:
- command: [/bin/bash, -c, 'kolla_start;
touch /var/lib/kolla-kubernetes/event/shutdown;
']
env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-haproxy:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'kill -USR1 $(</var/run/haproxy.pid);
while true; do sleep 1000; done
']
name: haproxy
ports:
- {containerPort: 9292, name: glance-api}
readinessProbe:
httpGet: {path: /healthcheck, port: 9292}
initialDelaySeconds: 5
timeoutSeconds: 5
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: haproxy-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-glance-api:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'while true; do sleep 1; [ -f /var/lib/kolla-kubernetes/event/shutdown
] && break; done']
name: main
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
hostPID: true
nodeSelector: {kolla_controller: 'true'}
terminationGracePeriodSeconds: 172800
volumes:
- emptyDir: {}
name: kolla-kubernetes-events
- emptyDir: {}
name: pod-main-config
- configMap: {name: glance-api}
name: service-configmap
- configMap: {name: glance-api-haproxy}
name: haproxy-config
- configMap: {name: ceph-conf}
name: ceph-conf
- name: ceph-client-glance-keyring
secret: {secretName: ceph-client-glance-keyring}
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
deployment "glance-api" created
apiVersion: extensions/v1beta1
kind: Deployment
metadata: {name: glance-registry, namespace: kolla}
spec:
replicas: 1
strategy:
rollingUpdate: {maxSurge: 1, maxUnavailable: 1}
type: RollingUpdate
template:
metadata:
annotations: {kolla_upgrade: minor_rolling_safe, pod.alpha.kubernetes.io/init-containers: '[
{ "name": "update-config", "image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime",
"command": [ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;
CONF=/srv/pod-main-config/glance-registry.conf; crudini --set $CONF DEFAULT
bind_host 127.0.0.1; crudini --set $CONF DEFAULT bind_port 8080; "], "volumeMounts":
[ { "name": "service-configmap", "mountPath": "/srv/configmap" }, { "name":
"pod-main-config", "mountPath": "/srv/pod-main-config" } ] } ]'}
labels: {service: glance, type: registry}
spec:
containers:
- command: [/bin/bash, -c, 'kolla_start;
touch /var/lib/kolla-kubernetes/event/shutdown;
']
env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-haproxy:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'kill -USR1 $(</var/run/haproxy.pid);
while true; do sleep 1000; done
']
name: haproxy
ports:
- {containerPort: 9191, name: glance-registry}
readinessProbe:
httpGet: {path: /healthcheck, port: 9191}
initialDelaySeconds: 5
timeoutSeconds: 5
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: haproxy-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-glance-registry:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'while true; do sleep 1; [ -f /var/lib/kolla-kubernetes/event/shutdown
] && break; done']
name: main
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
terminationGracePeriodSeconds: 172800
volumes:
- emptyDir: {}
name: kolla-kubernetes-events
- emptyDir: {}
name: pod-main-config
- configMap: {name: glance-registry}
name: service-configmap
- configMap: {name: glance-registry-haproxy}
name: haproxy-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
deployment "glance-registry" created
apiVersion: extensions/v1beta1
kind: Deployment
metadata: {name: neutron-server, namespace: kolla}
spec:
replicas: 1
strategy:
rollingUpdate: {maxSurge: 1, maxUnavailable: 1}
type: RollingUpdate
template:
metadata:
annotations: {kolla_upgrade: minor_rolling_safe, pod.alpha.kubernetes.io/init-containers: '[
{ "name": "update-config", "image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime",
"command": [ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;
CONF=/srv/pod-main-config/neutron.conf; crudini --set $CONF DEFAULT bind_host
127.0.0.1; crudini --set $CONF DEFAULT bind_port 8080; "], "volumeMounts":
[ { "name": "service-configmap", "mountPath": "/srv/configmap" }, { "name":
"pod-main-config", "mountPath": "/srv/pod-main-config" } ] } ]'}
labels: {service: neutron, type: server}
spec:
containers:
- command: [/bin/bash, -c, 'kolla_start;
touch /var/lib/kolla-kubernetes/event/shutdown;
']
env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-haproxy:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'kill -USR1 $(</var/run/haproxy.pid);
while true; do sleep 1000; done
']
name: haproxy
ports:
- {containerPort: 9696, name: neutron-server}
readinessProbe:
httpGet: {path: /, port: 9696}
initialDelaySeconds: 5
timeoutSeconds: 5
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: haproxy-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-neutron-server:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'while true; do sleep 1; [ -f /var/lib/kolla-kubernetes/event/shutdown
] && break; done']
name: main
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
terminationGracePeriodSeconds: 172800
volumes:
- emptyDir: {}
name: kolla-kubernetes-events
- emptyDir: {}
name: pod-main-config
- configMap:
items:
- {key: neutron.conf, path: neutron.conf}
- {key: config.json, path: config.json}
- {key: ml2-conf.ini, path: ml2_conf.ini}
- {key: neutron-lbaas.conf, path: neutron_lbaas.conf}
- {key: neutron-vpnaas.conf, path: neutron_vpnaas.conf}
name: neutron-server
name: service-configmap
- configMap: {name: neutron-server-haproxy}
name: haproxy-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
deployment "neutron-server" created
apiVersion: extensions/v1beta1
kind: Deployment
metadata: {name: horizon, namespace: kolla}
spec:
replicas: 1
strategy: null
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "update-config",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;
sed -i ''s|^OPENSTACK_HOST.*=.*|OPENSTACK_HOST=\"192.168.42.160\"|g'' /srv/pod-main-config/local_settings;
sed -i ''s|^OPENSTACK_KEYSTONE_URL.*=.*|OPENSTACK_KEYSTONE_URL=\"http://keystone-admin:35357/v3\"|g''
/srv/pod-main-config/local_settings; "], "volumeMounts": [ { "name": "horizon-configmap",
"mountPath": "/srv/configmap" }, { "name": "pod-main-config", "mountPath":
"/srv/pod-main-config" } ] } ]'}
labels: {service: horizon, type: api}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-horizon:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
volumes:
- emptyDir: {}
name: pod-main-config
- configMap:
items:
- {key: horizon.conf, path: horizon.conf}
- {key: config.json, path: config.json}
- {key: local-settings, path: local_settings}
name: horizon
name: horizon-configmap
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
deployment "horizon" created
apiVersion: apps/v1alpha1
kind: PetSet
metadata: {name: nova-consoleauth, namespace: kolla}
spec:
replicas: 1
serviceName: nova
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "initialize-nova-consoleauth",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "sh", "-ce", "cp -a /config/..data/* /nova/"], "volumeMounts": [ { "name":
"nova-consoleauth-config", "mountPath": "/config/" }, { "name": "nova-config",
"mountPath": "/nova/" } ] } ]', pod.alpha.kubernetes.io/initialized: 'true'}
labels: {service: nova, type: consoleauth}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/ubuntu-binary-nova-consoleauth:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: nova-config}
- {mountPath: /etc/localtime, name: etc-localtime}
- {mountPath: /var/log/kolla, name: kolla-logs}
nodeSelector: {kolla_controller: 'true'}
volumes:
- configMap: {name: nova-consoleauth}
name: nova-consoleauth-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: nova-config
- emptyDir: {}
name: kolla-logs
petset "nova-consoleauth" created
apiVersion: extensions/v1beta1
kind: Deployment
metadata: {name: nova-novncproxy, namespace: kolla}
spec:
replicas: 1
strategy:
rollingUpdate: {maxSurge: 1, maxUnavailable: 1}
type: RollingUpdate
template:
metadata:
annotations: {kolla_upgrade: minor_rolling_safe, pod.alpha.kubernetes.io/init-containers: '[
{ "name": "update-config", "image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime",
"command": [ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;
CONF=/srv/pod-main-config/nova.conf; crudini --set $CONF vnc novncproxy_host
127.0.0.1; crudini --set $CONF vnc novncproxy_port 8080; "], "volumeMounts":
[ { "name": "service-configmap", "mountPath": "/srv/configmap" }, { "name":
"pod-main-config", "mountPath": "/srv/pod-main-config" } ] } ]'}
labels: {service: nova, type: novncproxy}
spec:
containers:
- command: [/bin/bash, -c, 'kolla_start;
touch /var/lib/kolla-kubernetes/event/shutdown;
']
env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-haproxy:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'kill -USR1 $(</var/run/haproxy.pid);
while true; do sleep 1000; done
']
name: haproxy
ports:
- {containerPort: 6080, name: nova-novncproxy}
readinessProbe:
httpGet: {path: /vnc_auto.html, port: 6080}
initialDelaySeconds: 5
timeoutSeconds: 5
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: haproxy-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/ubuntu-binary-nova-novncproxy:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'while true; do sleep 1; [ -f /var/lib/kolla-kubernetes/event/shutdown
] && break; done']
name: main
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
terminationGracePeriodSeconds: 172800
volumes:
- emptyDir: {}
name: kolla-kubernetes-events
- emptyDir: {}
name: pod-main-config
- configMap: {name: nova-novncproxy}
name: service-configmap
- configMap: {name: nova-novncproxy-haproxy}
name: haproxy-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
deployment "nova-novncproxy" created
apiVersion: extensions/v1beta1
kind: Deployment
metadata: {name: cinder-api, namespace: kolla}
spec:
replicas: 1
strategy:
rollingUpdate: {maxSurge: 1, maxUnavailable: 1}
type: RollingUpdate
template:
metadata:
annotations: {kolla_upgrade: minor_rolling_safe, pod.alpha.kubernetes.io/init-containers: '[
{ "name": "update-config", "image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime",
"command": [ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;
CONF=/srv/pod-main-config/cinder.conf; crudini --set $CONF DEFAULT osapi_volume_listen
127.0.0.1; crudini --set $CONF DEFAULT osapi_volume_listen_port 8080; "],
"volumeMounts": [ { "name": "service-configmap", "mountPath": "/srv/configmap"
}, { "name": "pod-main-config", "mountPath": "/srv/pod-main-config" } ]
} ]'}
labels: {service: cinder, type: api}
spec:
containers:
- command: [/bin/bash, -c, 'kolla_start;
touch /var/lib/kolla-kubernetes/event/shutdown;
']
env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-haproxy:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'kill -USR1 $(</var/run/haproxy.pid);
while true; do sleep 1000; done
']
name: haproxy
ports:
- {containerPort: 8776, name: cinder-api}
readinessProbe:
httpGet: {path: /, port: 8776}
initialDelaySeconds: 5
timeoutSeconds: 5
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: haproxy-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-cinder-api:2.0.2
lifecycle:
preStop:
exec:
command: [/bin/bash, -c, 'while true; do sleep 1; [ -f /var/lib/kolla-kubernetes/event/shutdown
] && break; done']
name: main
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
terminationGracePeriodSeconds: 172800
volumes:
- emptyDir: {}
name: kolla-kubernetes-events
- emptyDir: {}
name: pod-main-config
- configMap: {name: cinder-api}
name: service-configmap
- configMap: {name: cinder-api-haproxy}
name: haproxy-config
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
deployment "cinder-api" created
apiVersion: apps/v1alpha1
kind: PetSet
metadata: {name: cinder-scheduler, namespace: kolla}
spec:
replicas: 1
serviceName: cinder
template:
metadata:
annotations: {pod.alpha.kubernetes.io/initialized: 'true'}
labels: {service: cinder, type: scheduler}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-cinder-scheduler:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla-kubernetes/event, name: kolla-kubernetes-events}
- {mountPath: /var/lib/kolla/config_files, name: cinder-scheduler-configmap}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/localtime, name: etc-localtime}
nodeSelector: {kolla_controller: 'true'}
volumes:
- emptyDir: {}
name: kolla-kubernetes-events
- configMap: {name: cinder-scheduler}
name: cinder-scheduler-configmap
- hostPath: {path: /etc/localtime}
name: etc-localtime
- emptyDir: {}
name: kolla-logs
petset "cinder-scheduler" created
apiVersion: apps/v1alpha1
kind: PetSet
metadata:
labels: {component: cinder-volume-ceph, system: cinder-volume-ceph}
name: cinder-volume-ceph
namespace: kolla
spec:
replicas: 1
serviceName: cinder
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "initialize-config",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "bash", "-ec", "cp -a /srv/configmap/..data/* /srv/pod-main-config/; cp
-a /srv/ceph.conf /srv/pod-main-config/; cp -a /srv/ceph.client.cinder.keyring
/srv/pod-main-config/; "], "volumeMounts": [ { "name": "ceph-conf", "mountPath":
"/srv/ceph.conf", "subPath": "ceph.conf" }, { "name": "ceph-client-cinder-keyring",
"mountPath": "/srv/ceph.client.cinder.keyring", "subPath": "ceph.client.cinder.keyring"
}, { "name": "cinder-volume-configmap", "mountPath": "/srv/configmap" },
{ "name": "pod-main-config", "mountPath": "/srv/pod-main-config" } ] },
{ "name": "initialize-cinder-logs", "image": "kolla/centos-binary-cinder-volume:2.0.2",
"command": [ "sh", "-ce", "mkdir /var/log/kolla/cinder ; chown -R cinder:
/var/log/kolla/cinder "], "volumeMounts": [ { "name": "kolla-logs", "mountPath":
"/var/log/kolla" } ] } ]', pod.alpha.kubernetes.io/initialized: 'true'}
labels: {component: cinder-volume-ceph, system: cinder-volume-ceph}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-cinder-volume:2.0.2
name: main
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config, readOnly: true}
- {mountPath: /etc/localtime, name: host-etc-localtime, readOnly: true}
- {mountPath: /var/log/kolla/, name: kolla-logs}
hostPID: true
nodeSelector: {kolla_controller: 'true'}
volumes:
- emptyDir: {}
name: pod-main-config
- configMap: {name: cinder-volume}
name: cinder-volume-configmap
- hostPath: {path: /etc/localtime}
name: host-etc-localtime
- emptyDir: {}
name: kolla-logs
- configMap: {name: ceph-conf}
name: ceph-conf
- name: ceph-client-cinder-keyring
secret: {secretName: ceph-client-cinder-keyring}
petset "cinder-volume-ceph" created
(.venv)watch kubectl get pods --namespace=kolla
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)for x in openvswitch-ovsdb-network openvswitch-vswitchd-network \
> neutron-openvswitch-agent-network neutron-dhcp-agent \
> neutron-metadata-agent-network neutron-l3-agent-network; \
> do
> kolla-kubernetes resource create pod $x
> done
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels: {component: openvswitch, system: openvswitch-db-network}
name: openvswitch-ovsdb-network
namespace: kolla
spec:
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "initialize-ovs-db",
"image": "kolla/centos-binary-openvswitch-db-server:2.0.2", "command": [
"sh", "-c", "mkdir -p /var/log/kolla/openvswitch; DB=/etc/openvswitch/conf.db;
/usr/bin/ovsdb-tool create $DB; echo $(date) >> /var/log/kolla/db-create.log;"
], "volumeMounts": [ { "name": "openvswitch-db", "mountPath": "/etc/openvswitch/"
}, { "name": "kolla-logs", "mountPath": "/var/log/kolla/" } ] } ]'}
labels: {component: openvswitch, system: openvswitch-db-network}
spec:
containers:
- args: ['/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err
-vfile:info --remote=punix:/var/run/openvswitch/db.sock --remote=ptcp:6640:0.0.0.0
--log-file=/var/log/kolla/openvswitch/ovsdb-server.log']
command: [sh, -c]
env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-openvswitch-db-server:2.0.2
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: openvswitch-db-config, readOnly: true}
- {mountPath: /etc/openvswitch, name: openvswitch-db}
- {mountPath: /var/run/openvswitch, name: openvswitch-run}
- {mountPath: /dev, name: host-dev}
- {mountPath: /etc/localtime, name: host-etc-localtime, readOnly: true}
- {mountPath: /var/log/kolla, name: kolla-logs}
hostIPC: true
hostNetwork: true
nodeSelector: {kolla_controller: 'true'}
volumes:
- configMap: {name: openvswitch-db-server}
name: openvswitch-db-config
- emptyDir: {}
name: openvswitch-db
- hostPath: {path: /var/run/openvswitch}
name: openvswitch-run
- hostPath: {path: /dev}
name: host-dev
- hostPath: {path: /etc/localtime}
name: host-etc-localtime
- emptyDir: {}
name: kolla-logs
daemonset "openvswitch-ovsdb-network" created
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels: {component: openvswitch, system: openvswitch-vswitchd-network}
name: openvswitch-vswitchd-network
namespace: kolla
spec:
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "initialize-ovs-vswitchd",
"image": "kolla/centos-binary-openvswitch-vswitchd:2.0.2", "command": [
"sh", "-c", "mkdir -p /var/log/kolla/openvswitch; ovs-vsctl --no-wait --db=unix:/var/run/openvswitch/db.sock
add-br br-ex ; echo $(date) >> /var/log/kolla/vswitchd-init.log;" ], "volumeMounts":
[ { "name": "kolla-logs", "mountPath": "/var/log/kolla/" }, { "name": "openvswitch-run",
"mountPath": "/var/run/openvswitch/" } ] } ]'}
labels: {component: openvswitch, system: openvswitch-vswitchd-network}
spec:
containers:
- args: ['modprobe openvswitch; /usr/sbin/ovs-vswitchd unix:/var/run/openvswitch/db.sock
-vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/kolla/openvswitch/ovs-vswitchd.log']
command: [sh, -c]
env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-openvswitch-vswitchd:2.0.2
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: openvswitch-vswitchd-config,
readOnly: true}
- {mountPath: /var/run/openvswitch, name: openvswitch-run}
- {mountPath: /dev, name: host-dev}
- {mountPath: /etc/localtime, name: host-etc-localtime, readOnly: true}
- {mountPath: /lib/modules, name: host-lib-modules, readOnly: true}
- {mountPath: /var/log/kolla, name: kolla-logs}
hostIPC: true
hostNetwork: true
nodeSelector: {kolla_controller: 'true'}
volumes:
- configMap: {name: openvswitch-vswitchd}
name: openvswitch-vswitchd-config
- hostPath: {path: /var/run/openvswitch}
name: openvswitch-run
- hostPath: {path: /dev}
name: host-dev
- hostPath: {path: /etc/localtime}
name: host-etc-localtime
- hostPath: {path: /lib/modules}
name: host-lib-modules
- emptyDir: {}
name: kolla-logs
daemonset "openvswitch-vswitchd-network" created
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels: {component: neutron, system: openvswitch-agent-network}
name: neutron-openvswitch-agent-network
namespace: kolla
spec:
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "update-config",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;
TUNNEL_INTERFACE=eth0; F=/var/lib/kolla-kubernetes/neutron-openvswitch-agent/tunnel_interface;
[ -f $F ] && TUNNEL_INTERFACE=$(<$F); IP=$(ip addr list eth0 | grep ''inet
'' |cut -d'' '' -f6|cut -d/ -f1);crudini --set /srv/pod-main-config/ml2_conf.ini
ovs local_ip $IP" ], "volumeMounts": [ { "name": "neutron-openvswitch-agent-configmap",
"mountPath": "/srv/configmap" }, { "name": "pod-main-config", "mountPath":
"/srv/pod-main-config" } ] }, { "name": "update-permissions", "image": "kolla/centos-binary-neutron-openvswitch-agent:2.0.2",
"securityContext": { "runAsUser": 0 }, "command": [ "/bin/sh", "-c", "chown
--recursive neutron.kolla /var/log/kolla;" ], "volumeMounts": [ { "name":
"kolla-logs", "mountPath": "/var/log/kolla" } ] } ]'}
labels: {component: neutron, system: openvswitch-agent-network}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-neutron-openvswitch-agent:2.0.2
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config, readOnly: true}
- {mountPath: /var/lib/kolla-kubernetes/neutron-openvswitch-agent, name: openvswitch-config,
readOnly: true}
- {mountPath: /var/run, name: host-run}
- {mountPath: /dev, name: host-dev}
- {mountPath: /etc/localtime, name: host-etc-localtime, readOnly: true}
- {mountPath: /lib/modules, name: host-lib-modules, readOnly: true}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/resolv.conf, name: resolv-conf, subPath: resolv.conf}
hostIPC: true
hostNetwork: true
nodeSelector: {kolla_controller: 'true'}
volumes:
- configMap:
items:
- {key: neutron.conf, path: neutron.conf}
- {key: config.json, path: config.json}
- {key: ml2-conf.ini, path: ml2_conf.ini}
name: neutron-openvswitch-agent
name: neutron-openvswitch-agent-configmap
- emptyDir: {}
name: pod-main-config
- hostPath: {path: /var/lib/kolla-kubernetes/neutron-openvswitch-agent}
name: openvswitch-config
- hostPath: {path: /var/run}
name: host-run
- hostPath: {path: /dev}
name: host-dev
- hostPath: {path: /etc/localtime}
name: host-etc-localtime
- hostPath: {path: /lib/modules}
name: host-lib-modules
- emptyDir: {}
name: kolla-logs
- configMap: {name: resolv-conf}
name: resolv-conf
daemonset "neutron-openvswitch-agent-network" created
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels: {component: neutron, system: dhcp-agent}
name: neutron-dhcp-agent
namespace: kolla
spec:
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "update-config",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;crudini
--set /srv/pod-main-config/ml2_conf.ini ovs local_ip $IP; "], "volumeMounts":
[ { "name": "neutron-dhcp-agent-configmap", "mountPath": "/srv/configmap"
}, { "name": "pod-main-config", "mountPath": "/srv/pod-main-config" } ]
}, { "name": "update-permissions", "image": "kolla/centos-binary-neutron-dhcp-agent:2.0.2",
"securityContext": { "runAsUser": 0 }, "command": [ "/bin/sh", "-c", "chown
--recursive neutron.kolla /var/log/kolla;" ], "volumeMounts": [ { "name":
"kolla-logs", "mountPath": "/var/log/kolla" } ] } ]'}
labels: {component: neutron, system: dhcp-agent}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-neutron-dhcp-agent:2.0.2
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config, readOnly: true}
- {mountPath: /var/lib/neutron/kolla/, name: neutron-metadata-socket}
- {mountPath: /var/run, name: host-run}
- {mountPath: /run/netns/, name: host-run-netns}
- {mountPath: /etc/localtime, name: host-etc-localtime, readOnly: true}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/resolv.conf, name: resolv-conf, subPath: resolv.conf}
hostIPC: true
hostNetwork: true
hostPID: true
nodeSelector: {kolla_controller: 'true'}
volumes:
- emptyDir: {}
name: pod-main-config
- configMap:
items:
- {key: neutron.conf, path: neutron.conf}
- {key: config.json, path: config.json}
- {key: ml2-conf.ini, path: ml2_conf.ini}
- {key: dhcp-agent.ini, path: dhcp_agent.ini}
- {key: dnsmasq.conf, path: dnsmasq.conf}
name: neutron-dhcp-agent
name: neutron-dhcp-agent-configmap
- hostPath: {path: /run/netns}
name: host-run-netns
- hostPath: {path: /var/run}
name: host-run
- hostPath: {path: /etc/localtime}
name: host-etc-localtime
- hostPath: {path: /var/lib/neutron/kolla/metadata_proxy}
name: neutron-metadata-socket
- emptyDir: {}
name: kolla-logs
- configMap: {name: resolv-conf}
name: resolv-conf
daemonset "neutron-dhcp-agent" created
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels: {component: neutron, system: metadata-agent-network}
name: neutron-metadata-agent-network
namespace: kolla
spec:
template:
metadata:
labels: {component: neutron, system: metadata-agent-network}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-neutron-metadata-agent:2.0.2
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: neutron-metadata-agent-config,
readOnly: true}
- {mountPath: /var/lib/neutron/kolla/, name: neutron-metadata-socket}
- {mountPath: /run/netns/, name: host-run-netns}
- {mountPath: /etc/localtime, name: host-etc-localtime, readOnly: true}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/resolv.conf, name: resolv-conf, subPath: resolv.conf}
hostNetwork: true
nodeSelector: {kolla_controller: 'true'}
volumes:
- configMap:
items:
- {key: neutron.conf, path: neutron.conf}
- {key: config.json, path: config.json}
- {key: ml2-conf.ini, path: ml2_conf.ini}
- {key: metadata-agent.ini, path: metadata_agent.ini}
name: neutron-metadata-agent
name: neutron-metadata-agent-config
- hostPath: {path: /run/netns}
name: host-run-netns
- hostPath: {path: /run}
name: host-run
- hostPath: {path: /etc/localtime}
name: host-etc-localtime
- hostPath: {path: /var/lib/neutron/kolla/metadata_proxy}
name: neutron-metadata-socket
- emptyDir: {}
name: kolla-logs
- configMap: {name: resolv-conf}
name: resolv-conf
daemonset "neutron-metadata-agent-network" created
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels: {component: neutron, system: l3-agent-network}
name: neutron-l3-agent-network
namespace: kolla
spec:
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "update-config",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "/bin/sh", "-c", "cp -a /srv/configmap/..data/* /srv/pod-main-config/;crudini
--set /srv/pod-main-config/ml2_conf.ini ovs local_ip $IP;crudini --set /srv/pod-main-config/l3_agent.ini
DEFAULT handle_internal_only_routers true;crudini --set /srv/pod-main-config/l3_agent.ini
DEFAULT agent_mode legacy;" ], "volumeMounts": [ { "name": "neutron-l3-agent-configmap",
"mountPath": "/srv/configmap" }, { "name": "pod-main-config", "mountPath":
"/srv/pod-main-config" } ] }, { "name": "update-permissions", "image": "kolla/centos-binary-neutron-l3-agent:2.0.2",
"securityContext": { "runAsUser": 0 }, "command": [ "/bin/sh", "-c", "chown
--recursive neutron.kolla /var/log/kolla; echo FIXME check for ip6tables
loaded;" ], "volumeMounts": [ { "name": "kolla-logs", "mountPath": "/var/log/kolla"
} ] } ]'}
labels: {component: neutron, system: l3-agent-network}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/centos-binary-neutron-l3-agent:2.0.2
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: pod-main-config}
- {mountPath: /var/lib/neutron/kolla/, name: neutron-metadata-socket}
- {mountPath: /var/run, name: host-run}
- {mountPath: /run/netns/, name: host-run-netns}
- {mountPath: /etc/localtime, name: host-etc-localtime, readOnly: true}
- {mountPath: /var/log/kolla/, name: kolla-logs}
- {mountPath: /etc/resolv.conf, name: resolv-conf, subPath: resolv.conf}
hostIPC: true
hostNetwork: true
hostPID: true
nodeSelector: {kolla_controller: 'true'}
volumes:
- emptyDir: {}
name: pod-main-config
- configMap:
items:
- {key: neutron.conf, path: neutron.conf}
- {key: config.json, path: config.json}
- {key: ml2-conf.ini, path: ml2_conf.ini}
- {key: fwaas-driver.ini, path: fwaas_driver.ini}
- {key: l3-agent.ini, path: l3_agent.ini}
name: neutron-l3-agent
name: neutron-l3-agent-configmap
- hostPath: {path: /run/netns}
name: host-run-netns
- hostPath: {path: /var/run}
name: host-run
- hostPath: {path: /etc/localtime}
name: host-etc-localtime
- hostPath: {path: /var/lib/neutron/kolla/metadata_proxy}
name: neutron-metadata-socket
- emptyDir: {}
name: kolla-logs
- configMap: {name: resolv-conf}
name: resolv-conf
daemonset "neutron-l3-agent-network" created
(.venv)
(.venv)kolla-kubernetes resource create bootstrap openvswitch-set-external-ip
apiVersion: batch/v1
kind: Job
metadata: {name: openvswitch-set-external-ip, namespace: kolla}
spec:
template:
spec:
containers:
- command: [/bin/bash, -ec, 'ip addr add 172.18.0.1/24 dev br-ex;
ip link set br-ex up;
']
image: kolla/centos-binary-openvswitch-vswitchd:2.0.2
name: main
securityContext: {privileged: true}
hostNetwork: true
nodeSelector: {kolla_controller: 'true'}
restartPolicy: OnFailure
job "openvswitch-set-external-ip" created
(.venv)kolla-kubernetes resource create pod nova-libvirt
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels: {component: nova, system: nova-libvirt}
name: nova-libvirt
namespace: kolla
spec:
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "initialize-nova-libvirt",
"image": "kolla/ubuntu-binary-nova-libvirt:2.0.2", "command": [ "sh", "-c",
"cp -a /config/..data/* /nova/; mkdir -p /nova/secrets/; cp -a /secret/a/..data/data
/nova/secrets/f68fec1b-1831-4fd8-99d1-5487c4609c5b.xml; cp -a /secret/b/..data/data
/nova/secrets/f68fec1b-1831-4fd8-99d1-5487c4609c5b.base64; cp -a /config2/..data/ceph.conf
/nova; "], "volumeMounts": [ { "name": "ceph-conf", "mountPath": "/config2"
}, { "name": "nova-libvirt-secret", "mountPath": "/secret/a/" }, { "name":
"nova-libvirt-bin-secret", "mountPath": "/secret/b/" }, { "name": "nova-libvirt-config",
"mountPath": "/config/" }, { "name": "nova-config", "mountPath": "/nova/"
} ] } ]'}
labels: {component: nova, system: nova-libvirt}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/ubuntu-binary-nova-libvirt:2.0.2
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: nova-config}
- {mountPath: /var/lib/libvirt, name: nova-libvirt-persistent-storage}
- {mountPath: /var/lib/nova, name: nova-compute-persistent-storage}
- {mountPath: /etc/localtime, name: etc-localtime}
- {mountPath: /lib/modules, name: lib-modules}
- {mountPath: /var/run, name: run}
- {mountPath: /dev, name: dev}
- {mountPath: /sys/fs/cgroup, name: sys-fs-cgroup}
- {mountPath: /nova, name: nova-config}
- {mountPath: /var/log/kolla, name: kolla-logs}
hostIPC: true
hostNetwork: true
hostPID: true
nodeSelector: {kolla_compute: 'true'}
volumes:
- hostPath: {path: /var/lib/nova}
name: nova-compute-persistent-storage
- configMap: {name: nova-libvirt}
name: nova-libvirt-config
- hostPath: {path: /var/lib/libvirt}
name: nova-libvirt-persistent-storage
- hostPath: {path: /etc/localtime}
name: etc-localtime
- hostPath: {path: /lib/modules}
name: lib-modules
- hostPath: {path: /var/run}
name: run
- hostPath: {path: /dev}
name: dev
- hostPath: {path: /sys/fs/cgroup}
name: sys-fs-cgroup
- emptyDir: {}
name: nova-config
- emptyDir: {}
name: kolla-logs
- configMap: {name: ceph-conf}
name: ceph-conf
- name: nova-libvirt-secret
secret: {secretName: nova-libvirt}
- name: nova-libvirt-bin-secret
secret: {secretName: nova-libvirt-bin}
daemonset "nova-libvirt" created
(.venv)kolla-kubernetes resource create pod nova-compute
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels: {component: nova, system: nova-compute}
name: nova-compute
namespace: kolla
spec:
template:
metadata:
annotations: {pod.alpha.kubernetes.io/init-containers: '[ { "name": "initialize-nova-compute",
"image": "kfox1111/centos-binary-kolla-toolbox:trunk-sometime", "command":
[ "sh", "-ec", "cp -a /config/..data/* /nova/; IP=$(ip addr show dev eth0
| grep -G ''inet '' | awk ''{print $2}'' | sed ''s@/.*@@''); mkdir -p /var/log/kolla/nova-init;
echo Using $IP | tee /var/log/kolla/nova-init/ip.log; crudini --set /nova/nova.conf
DEFAULT my_ip $IP; crudini --set /nova/nova.conf vnc novncproxy_host $IP;
crudini --set /nova/nova.conf vnc vncserver_listen $IP; crudini --set /nova/nova.conf
vnc vncserver_proxyclient_address $IP; crudini --set /nova/nova.conf vnc
novncproxy_base_url http://192.168.42.160:6080/vnc_auto.html; cp -a /secret/a/..data/ceph.client.nova.keyring
/nova/ceph.client.nova.keyring; cp -a /config2/..data/ceph.conf /nova/ceph.conf;
"], "volumeMounts": [ { "name": "ceph-conf", "mountPath": "/config2/" },
{ "name": "ceph-client-nova-keyring", "mountPath": "/secret/a/" }, { "name":
"kolla-logs", "mountPath": "/var/log/kolla/" }, { "name": "nova-compute-config",
"mountPath": "/config/" }, { "name": "nova-config", "mountPath": "/nova/"
} ] }, { "name": "initialize-nova", "image": "kolla/ubuntu-binary-nova-compute:2.0.2",
"command": [ "sh", "-ce", "[ ! -d /var/lib/nova/instances ] && mkdir -p
/var/lib/nova/instances; chown nova: /var/lib/nova; chown nova: /var/lib/nova/instances;
"], "securityContext": { "runAsUser": 0 }, "volumeMounts": [ { "name": "nova-compute-persistent-storage",
"mountPath": "/var/lib/nova" } ] } ]'}
labels: {component: nova, system: nova-compute}
spec:
containers:
- env:
- {name: KOLLA_CONFIG_STRATEGY, value: COPY_ALWAYS}
image: kolla/ubuntu-binary-nova-compute:2.0.2
name: main
securityContext: {privileged: true}
volumeMounts:
- {mountPath: /var/lib/kolla/config_files, name: nova-config}
- {mountPath: /var/lib/nova, name: nova-compute-persistent-storage}
- {mountPath: /etc/localtime, name: etc-localtime}
- {mountPath: /lib/modules, name: lib-modules}
- {mountPath: /var/run, name: run}
- {mountPath: /dev, name: dev}
- {mountPath: /sys/fs/cgroup, name: sys-fs-cgroup}
- {mountPath: /nova, name: nova-config}
- {mountPath: /var/log/kolla, name: kolla-logs}
- {mountPath: /etc/resolv.conf, name: resolv-conf, subPath: resolv.conf}
hostIPC: true
hostNetwork: true
nodeSelector: {kolla_compute: 'true'}
volumes:
- configMap: {name: nova-compute}
name: nova-compute-config
- hostPath: {path: /var/lib/nova}
name: nova-compute-persistent-storage
- hostPath: {path: /etc/localtime}
name: etc-localtime
- hostPath: {path: /lib/modules}
name: lib-modules
- hostPath: {path: /var/run}
name: run
- hostPath: {path: /dev}
name: dev
- hostPath: {path: /sys/fs/cgroup}
name: sys-fs-cgroup
- emptyDir: {}
name: nova-config
- emptyDir: {}
name: kolla-logs
- configMap: {name: ceph-conf}
name: ceph-conf
- name: ceph-client-nova-keyring
secret: {secretName: ceph-client-nova-keyring}
- configMap: {name: resolv-conf}
name: resolv-conf
daemonset "nova-compute" created
(.venv)
(.venv)watch kubectl get jobs --namespace=kolla
(.venv)
(.venv)
(.venv)
(.venv)
(.venv)for x in mariadb keystone horizon rabbitmq memcached nova-api \
> nova-conductor nova-scheduler glance-api-haproxy \
> glance-registry-haproxy glance-api glance-registry \
> neutron-server neutron-dhcp-agent neutron-l3-agent \
> neutron-metadata-agent neutron-openvswitch-agent \
> openvswitch-db-server openvswitch-vswitchd nova-libvirt \
> nova-compute nova-consoleauth nova-novncproxy \
> nova-novncproxy-haproxy neutron-server-haproxy \
> nova-api-haproxy cinder-api cinder-api-haproxy \
> cinder-backup cinder-scheduler cinder-volume \
> ceph-mon ceph-osd; \
> do
> kolla-kubernetes resource create configmap $x
> done
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/bin/mysqld_safe\",\n \"config_files\"\
: [\n {\n \"source\": \"/var/lib/kolla/config_files/galera.cnf\"\
,\n \"dest\": \"/etc//my.cnf\",\n \"owner\": \"mysql\",\n\
\ \"perm\": \"0600\"\n },\n {\n \"source\"\
: \"/var/lib/kolla/config_files/wsrep-notify.sh\",\n \"dest\": \"/usr/local/bin/wsrep-notify.sh\"\
,\n \"owner\": \"mysql\",\n \"perm\": \"0700\"\n \
\ }\n ]\n}\n", galera.cnf: '[client]
default-character-set = utf8
[mysql]
default-character-set = utf8
[mysqld]
bind-address = 0.0.0.0
port = 3306
log-error = /var/log/kolla/mariadb/mariadb.log
binlog_format = ROW
default-storage-engine = innodb
innodb_autoinc_lock_mode = 2
collation-server = utf8_unicode_ci
init-connect = ''SET NAMES utf8''
character-set-server = utf8
datadir = /var/lib/mysql/
wsrep_cluster_address = gcomm://
wsrep_provider_options = gmcast.listen_addr=tcp://0.0.0.0:4567;ist.recv_addr=0.0.0.0:4568
wsrep_node_address = 0.0.0.0:4567
wsrep_sst_receive_address = 0.0.0.0:4444
wsrep_provider = none
wsrep_cluster_name = "openstack"
wsrep_node_name = toolbox
wsrep_sst_method = xtrabackup-v2
wsrep_sst_auth = root:K5zyKDRLG8HvT65aZl1VKsLC9VX6Gv88sIJjMYYz
wsrep_slave_threads = 4
wsrep_notify_cmd = /usr/local/bin/wsrep-notify.sh
max_connections = 10000
[server]
pid-file = /var/lib/mysql/mariadb.pid
', wsrep-notify.sh: "#!/bin/bash -e\n\n# Edit parameters below to specify the\
\ address and login to server.\nUSER=root\nPSWD=K5zyKDRLG8HvT65aZl1VKsLC9VX6Gv88sIJjMYYz\n\
HOST=0.0.0.0\nPORT=3306\nLB_USER=haproxy\n\nENABLE_LB=\"UPDATE mysql.user SET\
\ User='${LB_USER}' WHERE User='${LB_USER}_blocked';\"\nDISABLE_LB=\"UPDATE mysql.user\
\ SET User='${LB_USER}_blocked' WHERE User='${LB_USER}';\"\nMYSQL_CMD=\"`type\
\ -p mysql` -B -u$USER -p$PSWD -h$HOST -P$PORT\"\n\nstatus_update()\n{\n echo\
\ \"SET SESSION wsrep_on=off;\"\n echo \"$@\"\n echo \"FLUSH PRIVILEGES;\"\
\n}\n\nget_sst_method()\n{\n $MYSQL_CMD -s -N -e \"SHOW VARIABLES LIKE 'wsrep_sst_method';\"\
\ | awk '{ print $2 }'\n}\n\nwhile [ $# -gt 0 ]\ndo\n case $1 in\n --status)\n\
\ STATUS=$2\n shift\n ;;\n --uuid)\n CLUSTER_UUID=$2\n\
\ shift\n ;;\n --primary)\n [ \"$2\" = \"yes\" ] && PRIMARY=\"\
1\" || PRIMARY=\"0\"\n shift\n ;;\n --index)\n INDEX=$2\n\
\ shift\n ;;\n --members)\n MEMBERS=$2\n shift\n\
\ ;;\n esac\n shift\ndone\n\ncase $STATUS in\nSynced)\n CMD=$ENABLE_LB\n\
\ ;;\nDonor)\n # enabling donor only if xtrabackup configured\n SST_METHOD=`get_sst_method`\n\
\ [[ $SST_METHOD =~ 'xtrabackup' ]] && CMD=$ENABLE_LB || CMD=$DISABLE_LB\n\
\ ;;\nUndefined)\n # shutting down database: do nothing\n ;;\n*)\n \
\ CMD=$DISABLE_LB\n ;;\nesac\n\nif [ -n \"$CMD\" ]\nthen\n status_update\
\ \"$CMD\" | $MYSQL_CMD\nfi\n\nexit 0\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: mariadb, namespace: kolla}
configmap "mariadb" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/sbin/httpd\",\n \"config_files\"\
: [\n {\n \"source\": \"/var/lib/kolla/config_files/keystone.conf\"\
,\n \"dest\": \"/etc/keystone/keystone.conf\",\n \"owner\"\
: \"keystone\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/keystone-paste.ini\",\n \
\ \"dest\": \"/etc/keystone/keystone-paste.ini\",\n \"owner\"\
: \"keystone\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/domains\",\n \"dest\"\
: \"/etc/keystone/domains\",\n \"owner\": \"keystone\",\n \
\ \"perm\": \"0700\",\n \"optional\": true\n },\n {\n\
\ \"source\": \"/var/lib/kolla/config_files/policy.json\",\n \
\ \"dest\": \"/etc/keystone/policy.json\",\n \"owner\": \"keystone\"\
,\n \"perm\": \"0600\",\n \"optional\": true\n },\n\
\ {\n \"source\": \"/var/lib/kolla/config_files/wsgi-keystone.conf\"\
,\n \"dest\": \"/etc/httpd/conf.d/wsgi-keystone.conf\",\n \
\ \"owner\": \"keystone\",\n \"perm\": \"0644\"\n }\n ]\n\
}\n", keystone-paste.ini: '[filter:debug]
use = egg:oslo.middleware#debug
[filter:request_id]
use = egg:oslo.middleware#request_id
[filter:build_auth_context]
use = egg:keystone#build_auth_context
[filter:token_auth]
use = egg:keystone#token_auth
[filter:json_body]
use = egg:keystone#json_body
[filter:cors]
use = egg:oslo.middleware#cors
oslo_config_project = keystone
[filter:ec2_extension]
use = egg:keystone#ec2_extension
[filter:ec2_extension_v3]
use = egg:keystone#ec2_extension_v3
[filter:s3_extension]
use = egg:keystone#s3_extension
[filter:url_normalize]
use = egg:keystone#url_normalize
[filter:sizelimit]
use = egg:oslo.middleware#sizelimit
[app:public_service]
use = egg:keystone#public_service
[app:service_v3]
use = egg:keystone#service_v3
[app:admin_service]
use = egg:keystone#admin_service
[pipeline:public_api]
pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth
json_body ec2_extension public_service
[pipeline:admin_api]
pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth
json_body ec2_extension s3_extension admin_service
[pipeline:api_v3]
pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth
json_body ec2_extension_v3 s3_extension service_v3
[app:public_version_service]
use = egg:keystone#public_version_service
[app:admin_version_service]
use = egg:keystone#admin_version_service
[pipeline:public_version_api]
pipeline = cors sizelimit url_normalize public_version_service
[pipeline:admin_version_api]
pipeline = cors sizelimit url_normalize admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/v3 = api_v3
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/v3 = api_v3
/ = admin_version_api
', keystone.conf: '[DEFAULT]
debug = True
notification_format = cadf
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
log_file = /var/log/kolla/keystone/keystone.log
secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO
[database]
connection = mysql+pymysql://keystone:AfMit7QzW4CscKPRbaJIRPJV3LEXnz12xb4TPKvn@mariadb/keystone
max_retries = -1
[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = memcached
[oslo_messaging_notifications]
driver = messagingv2
', wsgi-keystone.conf: "Listen 0.0.0.0:5000\nListen 0.0.0.0:35357\n\n<VirtualHost\
\ *:5000>\n WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone\
\ group=keystone display-name=%{GROUP} python-path=/usr/lib/python2.7/site-packages\n\
\ WSGIProcessGroup keystone-public\n WSGIScriptAlias / /var/www/cgi-bin/keystone/main\n\
\ WSGIApplicationGroup %{GLOBAL}\n WSGIPassAuthorization On\n <IfVersion\
\ >= 2.4>\n ErrorLogFormat \"%{cu}t %M\"\n </IfVersion>\n ErrorLog\
\ \"/var/log/kolla/keystone/keystone-apache-public-error.log\"\n LogFormat\
\ \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b %D \\\"%{Referer}i\\\" \\\"\
%{User-Agent}i\\\"\" logformat\n CustomLog \"/var/log/kolla/keystone/keystone-apache-public-access.log\"\
\ logformat\n</VirtualHost>\n\n<VirtualHost *:35357>\n WSGIDaemonProcess keystone-admin\
\ processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP} python-path=/usr/lib/python2.7/site-packages\n\
\ WSGIProcessGroup keystone-admin\n WSGIScriptAlias / /var/www/cgi-bin/keystone/admin\n\
\ WSGIApplicationGroup %{GLOBAL}\n WSGIPassAuthorization On\n <IfVersion\
\ >= 2.4>\n ErrorLogFormat \"%{cu}t %M\"\n </IfVersion>\n ErrorLog\
\ \"/var/log/kolla/keystone/keystone-apache-admin-error.log\"\n LogFormat \"\
%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b %D \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\
\"\" logformat\n CustomLog \"/var/log/kolla/keystone/keystone-apache-admin-access.log\"\
\ logformat\n</VirtualHost>\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: keystone, namespace: kolla}
configmap "keystone" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/sbin/httpd -DFOREGROUND\",\n \"\
config_files\": [\n {\n \"source\": \"/var/lib/kolla/config_files/horizon.conf\"\
,\n \"dest\": \"/etc/httpd/conf.d/horizon.conf\",\n \"owner\"\
: \"horizon\",\n \"perm\": \"0644\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/local_settings\",\n \
\ \"dest\": \"/etc/openstack-dashboard/local_settings\",\n \"owner\"\
: \"horizon\",\n \"perm\": \"0644\"\n }\n ]\n}\n", horizon.conf: "Listen\
\ 0.0.0.0:80\n\n<VirtualHost *:80>\n LogLevel warn\n ErrorLog /var/log/kolla/horizon/horizon.log\n\
\ CustomLog /var/log/kolla/horizon/horizon-access.log combined\n\n WSGIScriptReloading\
\ On\n WSGIDaemonProcess horizon-http processes=5 threads=1 user=horizon group=horizon\
\ display-name=%{GROUP} python-path=/usr/lib/python2.7/site-packages\n WSGIProcessGroup\
\ horizon-http\n WSGIScriptAlias / /usr/lib/python2.7/site-packages/openstack_dashboard/wsgi/django.wsgi\n\
\ WSGIPassAuthorization On\n\n <Location \"/\">\n Require all granted\n\
\ </Location>\n\n Alias /static /usr/lib/python2.7/site-packages/static\n\
\ <Location \"/static\">\n SetHandler None\n </Location>\n</VirtualHost>\n\
\n# NOTE(Jeffrey4l): Only enable deflate when tls is disabled until the\n# OSSN-0037\
\ is fixed.\n# see https://wiki.openstack.org/wiki/OSSN/OSSN-0037 for more information.\n\
<IfModule mod_deflate.c>\n # Compress HTML, CSS, JavaScript, Text, XML and\
\ fonts\n AddOutputFilterByType DEFLATE application/javascript\n AddOutputFilterByType\
\ DEFLATE application/rss+xml\n AddOutputFilterByType DEFLATE application/vnd.ms-fontobject\n\
\ AddOutputFilterByType DEFLATE application/x-font\n AddOutputFilterByType\
\ DEFLATE application/x-font-opentype\n AddOutputFilterByType DEFLATE application/x-font-otf\n\
\ AddOutputFilterByType DEFLATE application/x-font-truetype\n AddOutputFilterByType\
\ DEFLATE application/x-font-ttf\n AddOutputFilterByType DEFLATE application/x-javascript\n\
\ AddOutputFilterByType DEFLATE application/xhtml+xml\n AddOutputFilterByType\
\ DEFLATE application/xml\n AddOutputFilterByType DEFLATE font/opentype\n \
\ AddOutputFilterByType DEFLATE font/otf\n AddOutputFilterByType DEFLATE\
\ font/ttf\n AddOutputFilterByType DEFLATE image/svg+xml\n AddOutputFilterByType\
\ DEFLATE image/x-icon\n AddOutputFilterByType DEFLATE text/css\n AddOutputFilterByType\
\ DEFLATE text/html\n AddOutputFilterByType DEFLATE text/javascript\n AddOutputFilterByType\
\ DEFLATE text/plain\n AddOutputFilterByType DEFLATE text/xml\n</IfModule>\n\
\n<IfModule mod_expires.c>\n <Filesmatch \"\\.(jpg|jpeg|png|gif|js|css|swf|ico|woff)$\"\
>\n ExpiresActive on\n ExpiresDefault \"access plus 1 month\"\n\
\ ExpiresByType application/javascript \"access plus 1 year\"\n \
\ ExpiresByType text/css \"access plus 1 year\"\n ExpiresByType image/x-ico\
\ \"access plus 1 year\"\n ExpiresByType image/jpg \"access plus 1 year\"\
\n ExpiresByType image/jpeg \"access plus 1 year\"\n ExpiresByType\
\ image/gif \"access plus 1 year\"\n ExpiresByType image/png \"access plus\
\ 1 year\"\n Header merge Cache-Control public\n Header unset ETag\n\
\ </Filesmatch>\n</IfModule>\n", local-settings: "# -*- coding: utf-8 -*-\n\
\nimport os\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom\
\ openstack_dashboard import exceptions\nfrom openstack_dashboard.settings import\
\ HORIZON_CONFIG\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nCOMPRESS_OFFLINE\
\ = True\n\n# WEBROOT is the location relative to Webserver root\n# should end\
\ with a slash.\nWEBROOT = '/'\n#LOGIN_URL = WEBROOT + 'auth/login/'\n#LOGOUT_URL\
\ = WEBROOT + 'auth/logout/'\n#\n# LOGIN_REDIRECT_URL can be used as an alternative\
\ for\n# HORIZON_CONFIG.user_home, if user_home is not set.\n# Do not set it to\
\ '/home/', as this will cause circular redirect loop\n#LOGIN_REDIRECT_URL = WEBROOT\n\
\n# If horizon is running in production (DEBUG is False), set this\n# with the\
\ list of host/domain names that the application can serve.\n# For more information\
\ see:\n# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\n\
ALLOWED_HOSTS = ['*']\n\n\n# Set SSL proxy settings:\n# Pass this header from\
\ the proxy after terminating the SSL,\n# and don't forget to strip it from the\
\ client's request.\n# For more information see:\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header\n\
#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# If Horizon\
\ is being served through SSL, then uncomment the following two\n# settings to\
\ better secure the cookies from security exploits\n#CSRF_COOKIE_SECURE = True\n\
#SESSION_COOKIE_SECURE = True\n\n\n# The absolute path to the directory where\
\ message files are collected.\n# The message file must have a .json file extension.\
\ When the user logins to\n# horizon, the message files collected are processed\
\ and displayed to the user.\n#MESSAGES_PATH=None\n\n# Overrides for OpenStack\
\ API versions. Use this setting to force the\n# OpenStack dashboard to use a\
\ specific API version for a given service API.\n# Versions specified here should\
\ be integers or floats, not strings.\n# NOTE: The version should be formatted\
\ as it appears in the URL for the\n# service API. For example, The identity service\
\ APIs have inconsistent\n# use of the decimal point, so valid options would be\
\ 2.0 or 3.\n# Minimum compute version to get the instance locked status is 2.9.\n\
#OPENSTACK_API_VERSIONS = {\n# \"data-processing\": 1.1,\n# \"identity\"\
: 3,\n# \"volume\": 2,\n# \"compute\": 2,\n#}\n\nOPENSTACK_API_VERSIONS\
\ = {\n \"identity\": 3,\n}\n\n# Set this to True if running on a multi-domain\
\ model. When this is enabled, it\n# will require the user to enter the Domain\
\ name in addition to the username\n# for login.\n#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT\
\ = False\n\n# Overrides the default domain used when running on single-domain\
\ model\n# with Keystone V3. All entities will be created in the default domain.\n\
# NOTE: This value must be the ID of the default domain, NOT the name.\n# Also,\
\ you will most likely have a value in the keystone policy file like this\n# \
\ \"cloud_admin\": \"rule:admin_required and domain_id:<your domain id>\"\n\
# This value must match the domain id specified there.\n#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN\
\ = 'default'\n\n# Set this to True to enable panels that provide the ability\
\ for users to\n# manage Identity Providers (IdPs) and establish a set of rules\
\ to map\n# federation protocol attributes to Identity API attributes.\n# This\
\ extension requires v3.0+ of the Identity API.\n#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT\
\ = False\n\n# Set Console type:\n# valid options are \"AUTO\"(default), \"VNC\"\
, \"SPICE\", \"RDP\", \"SERIAL\" or None\n# Set to None explicitly if you want\
\ to deactivate the console.\n#CONSOLE_TYPE = \"AUTO\"\n\n# If provided, a \"\
Report Bug\" link will be displayed in the site header\n# which links to the value\
\ of this setting (ideally a URL containing\n# information on how to report issues).\n\
#HORIZON_CONFIG[\"bug_url\"] = \"http://bug-report.example.com\"\n\n# Show backdrop\
\ element outside the modal, do not close the modal\n# after clicking on backdrop.\n\
#HORIZON_CONFIG[\"modal_backdrop\"] = \"static\"\n\n# Specify a regular expression\
\ to validate user passwords.\n#HORIZON_CONFIG[\"password_validator\"] = {\n#\
\ \"regex\": '.*',\n# \"help_text\": _(\"Your password does not meet the\
\ requirements.\"),\n#}\n\n# Disable simplified floating IP address management\
\ for deployments with\n# multiple floating IP pools or complex network requirements.\n\
#HORIZON_CONFIG[\"simple_ip_management\"] = False\n\n# Turn off browser autocompletion\
\ for forms including the login form and\n# the database creation workflow if\
\ so desired.\n#HORIZON_CONFIG[\"password_autocomplete\"] = \"off\"\n\n# Setting\
\ this to True will disable the reveal button for password fields,\n# including\
\ on the login form.\n#HORIZON_CONFIG[\"disable_password_reveal\"] = False\n\n\
LOCAL_PATH = '/tmp'\n\n# Set custom secret key:\n# You can either set it to a\
\ specific value or you can let horizon generate a\n# default secret key that\
\ is unique on this machine, e.i. regardless of the\n# amount of Python WSGI workers\
\ (if used behind Apache+mod_wsgi): However,\n# there may be situations where\
\ you would want to set this explicitly, e.g.\n# when multiple dashboard instances\
\ are distributed on different machines\n# (usually behind a load-balancer). Either\
\ you have to make sure that a session\n# gets all requests routed to the same\
\ dashboard instance or you set the same\n# SECRET_KEY for all of them.\nSECRET_KEY='d5iGA5ZlCCQV9XAaI5Tc1FnUDKj8Zyd0b4CpM3IQ'\n\
\n# We recommend you use memcached for development; otherwise after every reload\n\
# of the django development server, you will have to login again. To use\n# memcached\
\ set CACHES to something like\n#CACHES = {\n# 'default': {\n# 'BACKEND':\
\ 'django.core.cache.backends.memcached.MemcachedCache',\n# 'LOCATION':\
\ '127.0.0.1:11211',\n# },\n#}\n\nCACHES = {\n 'default': {\n 'BACKEND':\
\ 'django.core.cache.backends.memcached.MemcachedCache',\n\n 'LOCATION':\
\ 'memcached'\n }\n}\n\n# Send email to the console by default\nEMAIL_BACKEND\
\ = 'django.core.mail.backends.console.EmailBackend'\n# Or send them to /dev/null\n\
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'\n\n# Configure\
\ these for your outgoing email host\n#EMAIL_HOST = 'smtp.my-company.com'\n#EMAIL_PORT\
\ = 25\n#EMAIL_HOST_USER = 'djangomail'\n#EMAIL_HOST_PASSWORD = 'top-secret!'\n\
\n# For multiple regions uncomment this configuration, and add (endpoint, title).\n\
#AVAILABLE_REGIONS = [\n# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),\n\
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),\n#]\n\nOPENSTACK_HOST\
\ = \"0.0.0.0\"\n\nOPENSTACK_KEYSTONE_URL = \"http://keystone-public:5000/v3\"\
\nOPENSTACK_KEYSTONE_DEFAULT_ROLE = \"_member_\"\n\n# Enables keystone web single-sign-on\
\ if set to True.\n#WEBSSO_ENABLED = False\n\n# Determines which authentication\
\ choice to show as default.\n#WEBSSO_INITIAL_CHOICE = \"credentials\"\n\n# The\
\ list of authentication mechanisms which include keystone\n# federation protocols\
\ and identity provider/federation protocol\n# mapping keys (WEBSSO_IDP_MAPPING).\
\ Current supported protocol\n# IDs are 'saml2' and 'oidc' which represent SAML\
\ 2.0, OpenID\n# Connect respectively.\n# Do not remove the mandatory credentials\
\ mechanism.\n# Note: The last two tuples are sample mapping keys to a identity\
\ provider\n# and federation protocol combination (WEBSSO_IDP_MAPPING).\n#WEBSSO_CHOICES\
\ = (\n# (\"credentials\", _(\"Keystone Credentials\")),\n# (\"oidc\", _(\"\
OpenID Connect\")),\n# (\"saml2\", _(\"Security Assertion Markup Language\"\
)),\n# (\"acme_oidc\", \"ACME - OpenID Connect\"),\n# (\"acme_saml2\", \"\
ACME - SAML2\"),\n#)\n\n# A dictionary of specific identity provider and federation\
\ protocol\n# combinations. From the selected authentication mechanism, the value\n\
# will be looked up as keys in the dictionary. If a match is found,\n# it will\
\ redirect the user to a identity provider and federation protocol\n# specific\
\ WebSSO endpoint in keystone, otherwise it will use the value\n# as the protocol_id\
\ when redirecting to the WebSSO by protocol endpoint.\n# NOTE: The value is expected\
\ to be a tuple formatted as: (<idp_id>, <protocol_id>).\n#WEBSSO_IDP_MAPPING\
\ = {\n# \"acme_oidc\": (\"acme\", \"oidc\"),\n# \"acme_saml2\": (\"acme\"\
, \"saml2\"),\n#}\n\n# Disable SSL certificate checks (useful for self-signed\
\ certificates):\n#OPENSTACK_SSL_NO_VERIFY = True\n\n# The CA certificate to use\
\ to verify SSL connections\n#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'\n\n\
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the\n# capabilities\
\ of the auth backend for Keystone.\n# If Keystone has been configured to use\
\ LDAP as the auth backend then set\n# can_edit_user to False and name to 'ldap'.\n\
#\n# TODO(tres): Remove these once Keystone has an API to identify auth backend.\n\
OPENSTACK_KEYSTONE_BACKEND = {\n 'name': 'native',\n 'can_edit_user': True,\n\
\ 'can_edit_group': True,\n 'can_edit_project': True,\n 'can_edit_domain':\
\ True,\n 'can_edit_role': True,\n}\n\n# Setting this to True, will add a new\
\ \"Retrieve Password\" action on instance,\n# allowing Admin session password\
\ retrieval/decryption.\n#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False\n\n# The\
\ Launch Instance user experience has been significantly enhanced.\n# You can\
\ choose whether to enable the new launch instance experience,\n# the legacy experience,\
\ or both. The legacy experience will be removed\n# in a future release, but is\
\ available as a temporary backup setting to ensure\n# compatibility with existing\
\ deployments. Further development will not be\n# done on the legacy experience.\
\ Please report any problems with the new\n# experience via the Launchpad tracking\
\ system.\n#\n# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED\
\ to\n# determine the experience to enable. Set them both to true to enable\n\
# both.\n#LAUNCH_INSTANCE_LEGACY_ENABLED = True\n#LAUNCH_INSTANCE_NG_ENABLED =\
\ False\n\n# A dictionary of settings which can be used to provide the default\
\ values for\n# properties found in the Launch Instance modal.\n#LAUNCH_INSTANCE_DEFAULTS\
\ = {\n# 'config_drive': False,\n# 'enable_scheduler_hints': True\n#}\n\n\
# The Xen Hypervisor has the ability to set the mount point for volumes\n# attached\
\ to instances (other Hypervisors currently do not). Setting\n# can_set_mount_point\
\ to True will add the option to set the mount point\n# from the UI.\nOPENSTACK_HYPERVISOR_FEATURES\
\ = {\n 'can_set_mount_point': False,\n 'can_set_password': False,\n \
\ 'requires_keypair': False,\n 'enable_quotas': True\n}\n\n# The OPENSTACK_CINDER_FEATURES\
\ settings can be used to enable optional\n# services provided by cinder that\
\ is not exposed by its extension API.\nOPENSTACK_CINDER_FEATURES = {\n 'enable_backup':\
\ False,\n}\n\n# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable\
\ optional\n# services provided by neutron. Options currently available are load\n\
# balancer service, security groups, quotas, VPN service.\nOPENSTACK_NEUTRON_NETWORK\
\ = {\n 'enable_router': True,\n 'enable_quotas': True,\n 'enable_ipv6':\
\ True,\n 'enable_distributed_router': False,\n 'enable_ha_router': False,\n\
\ 'enable_lb': True,\n 'enable_firewall': True,\n 'enable_vpn': True,\n\
\ 'enable_fip_topology_check': True,\n\n # Default dns servers you would\
\ like to use when a subnet is\n # created. This is only a default, users\
\ can still choose a different\n # list of dns servers when creating a new\
\ subnet.\n # The entries below are examples only, and are not appropriate\
\ for\n # real deployments\n # 'default_dns_nameservers': [\"8.8.8.8\",\
\ \"8.8.4.4\", \"208.67.222.222\"],\n\n # The profile_support option is used\
\ to detect if an external router can be\n # configured via the dashboard.\
\ When using specific plugins the\n # profile_support can be turned on if needed.\n\
\ 'profile_support': None,\n #'profile_support': 'cisco',\n\n # Set which\
\ provider network types are supported. Only the network types\n # in this\
\ list will be available to choose from when creating a network.\n # Network\
\ types include local, flat, vlan, gre, vxlan and geneve.\n # 'supported_provider_types':\
\ ['*'],\n\n # You can configure available segmentation ID range per network\
\ type\n # in your deployment.\n # 'segmentation_id_range': {\n # \
\ 'vlan': [1024, 2048],\n # 'vxlan': [4094, 65536],\n # },\n\n #\
\ You can define additional provider network types here.\n # 'extra_provider_types':\
\ {\n # 'awesome_type': {\n # 'display_name': 'Awesome New Type',\n\
\ # 'require_physical_network': False,\n # 'require_segmentation_id':\
\ True,\n # }\n # },\n\n # Set which VNIC types are supported for\
\ port binding. Only the VNIC\n # types in this list will be available to choose\
\ from when creating a\n # port.\n # VNIC types include 'normal', 'macvtap'\
\ and 'direct'.\n # Set to empty list or None to disable VNIC type selection.\n\
\ 'supported_vnic_types': ['*'],\n}\n\n# The OPENSTACK_HEAT_STACK settings\
\ can be used to disable password\n# field required while launching the stack.\n\
OPENSTACK_HEAT_STACK = {\n 'enable_user_pass': True,\n}\n\n# The OPENSTACK_IMAGE_BACKEND\
\ settings can be used to customize features\n# in the OpenStack Dashboard related\
\ to the Image service, such as the list\n# of supported image formats.\n#OPENSTACK_IMAGE_BACKEND\
\ = {\n# 'image_formats': [\n# ('', _('Select format')),\n# ('aki',\
\ _('AKI - Amazon Kernel Image')),\n# ('ami', _('AMI - Amazon Machine Image')),\n\
# ('ari', _('ARI - Amazon Ramdisk Image')),\n# ('docker', _('Docker')),\n\
# ('iso', _('ISO - Optical Disk Image')),\n# ('ova', _('OVA - Open\
\ Virtual Appliance')),\n# ('qcow2', _('QCOW2 - QEMU Emulator')),\n# \
\ ('raw', _('Raw')),\n# ('vdi', _('VDI - Virtual Disk Image')),\n\
# ('vhd', _('VHD - Virtual Hard Disk')),\n# ('vmdk', _('VMDK - Virtual\
\ Machine Disk')),\n# ],\n#}\n\n# The IMAGE_CUSTOM_PROPERTY_TITLES settings\
\ is used to customize the titles for\n# image custom property attributes that\
\ appear on image detail pages.\nIMAGE_CUSTOM_PROPERTY_TITLES = {\n \"architecture\"\
: _(\"Architecture\"),\n \"kernel_id\": _(\"Kernel ID\"),\n \"ramdisk_id\"\
: _(\"Ramdisk ID\"),\n \"image_state\": _(\"Euca2ools state\"),\n \"project_id\"\
: _(\"Project ID\"),\n \"image_type\": _(\"Image Type\"),\n}\n\n# The IMAGE_RESERVED_CUSTOM_PROPERTIES\
\ setting is used to specify which image\n# custom properties should not be displayed\
\ in the Image Custom Properties\n# table.\nIMAGE_RESERVED_CUSTOM_PROPERTIES =\
\ []\n\n# Set to 'legacy' or 'direct' to allow users to upload images to glance\
\ via\n# Horizon server. When enabled, a file form field will appear on the create\n\
# image form. If set to 'off', there will be no file form field on the create\n\
# image form. See documentation for deployment considerations.\n#HORIZON_IMAGES_UPLOAD_MODE\
\ = 'legacy'\n\n# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for\
\ the endpoints\n# in the Keystone service catalog. Use this setting when Horizon\
\ is running\n# external to the OpenStack environment. The default is 'publicURL'.\n\
OPENSTACK_ENDPOINT_TYPE = \"internalURL\"\n\n# SECONDARY_ENDPOINT_TYPE specifies\
\ the fallback endpoint type to use in the\n# case that OPENSTACK_ENDPOINT_TYPE\
\ is not present in the endpoints\n# in the Keystone service catalog. Use this\
\ setting when Horizon is running\n# external to the OpenStack environment. The\
\ default is None. This\n# value should differ from OPENSTACK_ENDPOINT_TYPE if\
\ used.\n#SECONDARY_ENDPOINT_TYPE = None\n\n# The number of objects (Swift containers/objects\
\ or images) to display\n# on a single page before providing a paging element\
\ (a \"more\" link)\n# to paginate results.\nAPI_RESULT_LIMIT = 1000\nAPI_RESULT_PAGE_SIZE\
\ = 20\n\n# The size of chunk in bytes for downloading objects from Swift\nSWIFT_FILE_TRANSFER_CHUNK_SIZE\
\ = 512 * 1024\n\n# Specify a maximum number of items to display in a dropdown.\n\
DROPDOWN_MAX_ITEMS = 30\n\n# The timezone of the server. This should correspond\
\ with the timezone\n# of your entire OpenStack installation, and hopefully be\
\ in UTC.\nTIME_ZONE = \"UTC\"\n\n# When launching an instance, the menu of available\
\ flavors is\n# sorted by RAM usage, ascending. If you would like a different\
\ sort order,\n# you can provide another flavor attribute as sorting key. Alternatively,\
\ you\n# can provide a custom callback method to use for sorting. You can also\
\ provide\n# a flag for reverse sort. For more info, see\n# http://docs.python.org/2/library/functions.html#sorted\n\
#CREATE_INSTANCE_FLAVOR_SORT = {\n# 'key': 'name',\n# # or\n# 'key':\
\ my_awesome_callback_method,\n# 'reverse': False,\n#}\n\n# Set this to True\
\ to display an 'Admin Password' field on the Change Password\n# form to verify\
\ that it is indeed the admin logged-in who wants to change\n# the password.\n\
#ENFORCE_PASSWORD_CHECK = False\n\n# Modules that provide /auth routes that can\
\ be used to handle different types\n# of user authentication. Add auth plugins\
\ that require extra route handling to\n# this list.\n#AUTHENTICATION_URLS = [\n\
# 'openstack_auth.urls',\n#]\n\n# The Horizon Policy Enforcement engine uses\
\ these values to load per service\n# policy rule files. The content of these\
\ files should match the files the\n# OpenStack services are using to determine\
\ role based access control in the\n# target installation.\n\n# Path to directory\
\ containing policy.json files\nPOLICY_FILES_PATH = '/etc/openstack-dashboard'\n\
\n# Map of local copy of service policy files.\n# Please insure that your identity\
\ policy file matches the one being used on\n# your keystone servers. There is\
\ an alternate policy file that may be used\n# in the Keystone v3 multi-domain\
\ case, policy.v3cloudsample.json.\n# This file is not included in the Horizon\
\ repository by default but can be\n# found at\n# http://git.openstack.org/cgit/openstack/keystone/tree/etc/\
\ \\\n# policy.v3cloudsample.json\n# Having matching policy files on the Horizon\
\ and Keystone servers is essential\n# for normal operation. This holds true for\
\ all services and their policy files.\n#POLICY_FILES = {\n# 'identity': 'keystone_policy.json',\n\
# 'compute': 'nova_policy.json',\n# 'volume': 'cinder_policy.json',\n# \
\ 'image': 'glance_policy.json',\n# 'orchestration': 'heat_policy.json',\n\
# 'network': 'neutron_policy.json',\n# 'telemetry': 'ceilometer_policy.json',\n\
#}\n\n# TODO: (david-lyle) remove when plugins support adding settings.\n# Note:\
\ Only used when trove-dashboard plugin is configured to be used by\n# Horizon.\n\
# Trove user and database extension support. By default support for\n# creating\
\ users and databases on database instances is turned on.\n# To disable these\
\ extensions set the permission here to something\n# unusable such as [\"!\"].\n\
#TROVE_ADD_USER_PERMS = []\n#TROVE_ADD_DATABASE_PERMS = []\n\n# Change this patch\
\ to the appropriate list of tuples containing\n# a key, label and static directory\
\ containing two files:\n# _variables.scss and _styles.scss\n#AVAILABLE_THEMES\
\ = [\n# ('default', 'Default', 'themes/default'),\n# ('material', 'Material',\
\ 'themes/material'),\n#]\n\nLOGGING = {\n 'version': 1,\n # When set to\
\ True this will disable all logging except\n # for loggers specified in this\
\ configuration dictionary. Note that\n # if nothing is specified here and\
\ disable_existing_loggers is True,\n # django.db.backends will still log unless\
\ it is disabled explicitly.\n 'disable_existing_loggers': False,\n 'formatters':\
\ {\n 'operation': {\n # The format of \"%(message)s\" is defined\
\ by\n # OPERATION_LOG_OPTIONS['format']\n 'format': '%(asctime)s\
\ %(message)s'\n },\n },\n 'handlers': {\n 'null': {\n \
\ 'level': 'DEBUG',\n 'class': 'logging.NullHandler',\n \
\ },\n 'console': {\n # Set the level to \"DEBUG\" for verbose\
\ output logging.\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n\
\ },\n 'operation': {\n 'level': 'INFO',\n \
\ 'class': 'logging.StreamHandler',\n 'formatter': 'operation',\n\
\ },\n },\n 'loggers': {\n # Logging from django.db.backends\
\ is VERY verbose, send to null\n # by default.\n 'django.db.backends':\
\ {\n 'handlers': ['null'],\n 'propagate': False,\n \
\ },\n 'requests': {\n 'handlers': ['null'],\n \
\ 'propagate': False,\n },\n 'horizon': {\n 'handlers':\
\ ['console'],\n 'level': 'DEBUG',\n 'propagate': False,\n\
\ },\n 'horizon.operation_log': {\n 'handlers': ['operation'],\n\
\ 'level': 'INFO',\n 'propagate': False,\n },\n \
\ 'openstack_dashboard': {\n 'handlers': ['console'],\n \
\ 'level': 'DEBUG',\n 'propagate': False,\n },\n \
\ 'novaclient': {\n 'handlers': ['console'],\n 'level':\
\ 'DEBUG',\n 'propagate': False,\n },\n 'cinderclient':\
\ {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n \
\ 'propagate': False,\n },\n 'keystoneclient': {\n \
\ 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate':\
\ False,\n },\n 'glanceclient': {\n 'handlers': ['console'],\n\
\ 'level': 'DEBUG',\n 'propagate': False,\n },\n\
\ 'neutronclient': {\n 'handlers': ['console'],\n \
\ 'level': 'DEBUG',\n 'propagate': False,\n },\n 'heatclient':\
\ {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n \
\ 'propagate': False,\n },\n 'ceilometerclient': {\n \
\ 'handlers': ['console'],\n 'level': 'DEBUG',\n \
\ 'propagate': False,\n },\n 'swiftclient': {\n 'handlers':\
\ ['console'],\n 'level': 'DEBUG',\n 'propagate': False,\n\
\ },\n 'openstack_auth': {\n 'handlers': ['console'],\n\
\ 'level': 'DEBUG',\n 'propagate': False,\n },\n\
\ 'nose.plugins.manager': {\n 'handlers': ['console'],\n \
\ 'level': 'DEBUG',\n 'propagate': False,\n },\n \
\ 'django': {\n 'handlers': ['console'],\n 'level':\
\ 'DEBUG',\n 'propagate': False,\n },\n 'iso8601': {\n\
\ 'handlers': ['null'],\n 'propagate': False,\n },\n\
\ 'scss': {\n 'handlers': ['null'],\n 'propagate':\
\ False,\n },\n },\n}\n\n# 'direction' should not be specified for all_tcp/udp/icmp.\n\
# It is specified in the form.\nSECURITY_GROUP_RULES = {\n 'all_tcp': {\n \
\ 'name': _('All TCP'),\n 'ip_protocol': 'tcp',\n 'from_port':\
\ '1',\n 'to_port': '65535',\n },\n 'all_udp': {\n 'name':\
\ _('All UDP'),\n 'ip_protocol': 'udp',\n 'from_port': '1',\n \
\ 'to_port': '65535',\n },\n 'all_icmp': {\n 'name': _('All\
\ ICMP'),\n 'ip_protocol': 'icmp',\n 'from_port': '-1',\n \
\ 'to_port': '-1',\n },\n 'ssh': {\n 'name': 'SSH',\n 'ip_protocol':\
\ 'tcp',\n 'from_port': '22',\n 'to_port': '22',\n },\n 'smtp':\
\ {\n 'name': 'SMTP',\n 'ip_protocol': 'tcp',\n 'from_port':\
\ '25',\n 'to_port': '25',\n },\n 'dns': {\n 'name': 'DNS',\n\
\ 'ip_protocol': 'tcp',\n 'from_port': '53',\n 'to_port':\
\ '53',\n },\n 'http': {\n 'name': 'HTTP',\n 'ip_protocol':\
\ 'tcp',\n 'from_port': '80',\n 'to_port': '80',\n },\n 'pop3':\
\ {\n 'name': 'POP3',\n 'ip_protocol': 'tcp',\n 'from_port':\
\ '110',\n 'to_port': '110',\n },\n 'imap': {\n 'name': 'IMAP',\n\
\ 'ip_protocol': 'tcp',\n 'from_port': '143',\n 'to_port':\
\ '143',\n },\n 'ldap': {\n 'name': 'LDAP',\n 'ip_protocol':\
\ 'tcp',\n 'from_port': '389',\n 'to_port': '389',\n },\n \
\ 'https': {\n 'name': 'HTTPS',\n 'ip_protocol': 'tcp',\n \
\ 'from_port': '443',\n 'to_port': '443',\n },\n 'smtps': {\n \
\ 'name': 'SMTPS',\n 'ip_protocol': 'tcp',\n 'from_port': '465',\n\
\ 'to_port': '465',\n },\n 'imaps': {\n 'name': 'IMAPS',\n\
\ 'ip_protocol': 'tcp',\n 'from_port': '993',\n 'to_port':\
\ '993',\n },\n 'pop3s': {\n 'name': 'POP3S',\n 'ip_protocol':\
\ 'tcp',\n 'from_port': '995',\n 'to_port': '995',\n },\n \
\ 'ms_sql': {\n 'name': 'MS SQL',\n 'ip_protocol': 'tcp',\n \
\ 'from_port': '1433',\n 'to_port': '1433',\n },\n 'mysql': {\n\
\ 'name': 'MYSQL',\n 'ip_protocol': 'tcp',\n 'from_port':\
\ '3306',\n 'to_port': '3306',\n },\n 'rdp': {\n 'name': 'RDP',\n\
\ 'ip_protocol': 'tcp',\n 'from_port': '3389',\n 'to_port':\
\ '3389',\n },\n}\n\n# Deprecation Notice:\n#\n# The setting FLAVOR_EXTRA_KEYS\
\ has been deprecated.\n# Please load extra spec metadata into the Glance Metadata\
\ Definition Catalog.\n#\n# The sample quota definitions can be found in:\n# <glance_source>/etc/metadefs/compute-quota.json\n\
#\n# The metadata definition catalog supports CLI and API:\n# $glance --os-image-api-version\
\ 2 help md-namespace-import\n# $glance-manage db_load_metadefs <directory_with_definition_files>\n\
#\n# See Metadata Definitions on: http://docs.openstack.org/developer/glance/\n\
\n# TODO: (david-lyle) remove when plugins support settings natively\n# Note:\
\ This is only used when the Sahara plugin is configured and enabled\n# for use\
\ in Horizon.\n# Indicate to the Sahara data processing service whether or not\n\
# automatic floating IP allocation is in effect. If it is not\n# in effect, the\
\ user will be prompted to choose a floating IP\n# pool for use in their cluster.\
\ False by default. You would want\n# to set this to True if you were running\
\ Nova Networking with\n# auto_assign_floating_ip = True.\n#SAHARA_AUTO_IP_ALLOCATION_ENABLED\
\ = False\n\n# The hash algorithm to use for authentication tokens. This must\n\
# match the hash algorithm that the identity server and the\n# auth_token middleware\
\ are using. Allowed values are the\n# algorithms supported by Python's hashlib\
\ library.\n#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'\n\n# AngularJS requires some\
\ settings to be made available to\n# the client side. Some settings are required\
\ by in-tree / built-in horizon\n# features. These settings must be added to REST_API_REQUIRED_SETTINGS\
\ in the\n# form of ['SETTING_1','SETTING_2'], etc.\n#\n# You may remove settings\
\ from this list for security purposes, but do so at\n# the risk of breaking a\
\ built-in horizon feature. These settings are required\n# for horizon to function\
\ properly. Only remove them if you know what you\n# are doing. These settings\
\ may in the future be moved to be defined within\n# the enabled panel configuration.\n\
# You should not add settings to this list for out of tree extensions.\n# See:\
\ https://wiki.openstack.org/wiki/Horizon/RESTAPI\nREST_API_REQUIRED_SETTINGS\
\ = ['OPENSTACK_HYPERVISOR_FEATURES',\n 'LAUNCH_INSTANCE_DEFAULTS',\n\
\ 'OPENSTACK_IMAGE_FORMATS']\n\n# Additional settings\
\ can be made available to the client side for\n# extensibility by specifying\
\ them in REST_API_ADDITIONAL_SETTINGS\n# !! Please use extreme caution as the\
\ settings are transferred via HTTP/S\n# and are not encrypted on the browser.\
\ This is an experimental API and\n# may be deprecated in the future without notice.\n\
#REST_API_ADDITIONAL_SETTINGS = []\n\n# DISALLOW_IFRAME_EMBED can be used to prevent\
\ Horizon from being embedded\n# within an iframe. Legacy browsers are still vulnerable\
\ to a Cross-Frame\n# Scripting (XFS) vulnerability, so this option allows extra\
\ security hardening\n# where iframes are not used in deployment. Default setting\
\ is True.\n# For more information see:\n# http://tinyurl.com/anticlickjack\n\
#DISALLOW_IFRAME_EMBED = True\n\n# Help URL can be made available for the client.\
\ To provide a help URL, edit the\n# following attribute to the URL of your choice.\n\
#HORIZON_CONFIG[\"help_url\"] = \"http://openstack.mycompany.org\"\n\n# Settings\
\ for OperationLogMiddleware\n# OPERATION_LOG_ENABLED is flag to use the function\
\ to log an operation on\n# Horizon.\n# mask_targets is arrangement for appointing\
\ a target to mask.\n# method_targets is arrangement of HTTP method to output\
\ log.\n# format is the log contents.\n#OPERATION_LOG_ENABLED = False\n#OPERATION_LOG_OPTIONS\
\ = {\n# 'mask_fields': ['password'],\n# 'target_methods': ['POST'],\n#\
\ 'format': (\"[%(domain_name)s] [%(domain_id)s] [%(project_name)s]\"\n# \
\ \" [%(project_id)s] [%(user_name)s] [%(user_id)s] [%(request_scheme)s]\"\
\n# \" [%(referer_url)s] [%(request_url)s] [%(message)s] [%(method)s]\"\
\n# \" [%(http_status)s] [%(param)s]\"),\n#}\n\n# The default date range\
\ in the Overview panel meters - either <today> minus N\n# days (if the value\
\ is integer N), or from the beginning of the current month\n# until today (if\
\ set to None). This setting should be used to limit the amount\n# of data fetched\
\ by default when rendering the Overview panel.\n#OVERVIEW_DAYS_RANGE = 1\n\n\
# To allow operators to require admin users provide a search criteria first\n\
# before loading any data into the admin views, set the following attribute to\n\
# True\n#ADMIN_FILTER_DATA_FIRST=False\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: horizon, namespace: kolla}
configmap "horizon" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/sbin/rabbitmq-server\",\n \"config_files\"\
: [\n {\n \"source\": \"/var/lib/kolla/config_files/rabbitmq-env.conf\"\
,\n \"dest\": \"/etc/rabbitmq/rabbitmq-env.conf\",\n \"\
owner\": \"rabbitmq\",\n \"perm\": \"0600\"\n },\n {\n\
\ \"source\": \"/var/lib/kolla/config_files/rabbitmq.config\",\n \
\ \"dest\": \"/etc/rabbitmq/rabbitmq.config\",\n \"owner\"\
: \"rabbitmq\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/rabbitmq-clusterer.config\"\
,\n \"dest\": \"/etc/rabbitmq/rabbitmq-clusterer.config\",\n \
\ \"owner\": \"rabbitmq\",\n \"perm\": \"0600\"\n },\n\
\ {\n \"source\": \"/var/lib/kolla/config_files/definitions.json\"\
,\n \"dest\": \"/etc/rabbitmq/definitions.json\",\n \"owner\"\
: \"rabbitmq\",\n \"perm\": \"0600\"\n }\n ]\n}\n", definitions.json: "{\n\
\ \"vhosts\": [\n {\"name\": \"/\"}\n ],\n \"users\": [\n {\"name\":\
\ \"openstack\", \"password\": \"tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o\", \"\
tags\": \"administrator\"}\n ],\n \"permissions\": [\n {\"user\": \"openstack\"\
, \"vhost\": \"/\", \"configure\": \".*\", \"write\": \".*\", \"read\": \".*\"\
}\n ],\n \"policies\":[\n {\"vhost\": \"/\", \"name\": \"ha-all\", \"pattern\"\
: \".*\", \"apply-to\": \"all\", \"definition\": {\"ha-mode\":\"all\"}, \"priority\"\
:0}\n ]\n}\n", rabbitmq-clusterer.config: "[\n {version, 1},\n {nodes, [\n\
\ {'rabbit@toolbox', disc} ]},\n {gospel,\n {node, 'rabbit@toolbox'}}\n\
].\n", rabbitmq-env.conf: 'RABBITMQ_NODENAME=rabbit
RABBITMQ_LOG_BASE=/var/log/kolla/rabbitmq
# TODO(sdake, vhosakot)
# erlang by default binds to wildcard (all interfaces) and can potentially
# interfere with the neutron external or tenant networks. We should in theory
# bind epmd to the host''s IPv4 address to address the issue however this also
# has issues and can crash erlang when it is compiled with IPv6 support.
# See bugs:
# https://bugs.launchpad.net/ubuntu/+source/erlang/+bug/1374109
# https://bugs.launchpad.net/kolla/+bug/1562701
# https://bugzilla.redhat.com/show_bug.cgi?id=1324922
#export ERL_EPMD_ADDRESS=0.0.0.0
export ERL_EPMD_PORT=4369
', rabbitmq.config: "[\n {kernel, [\n {inet_dist_use_interface, {0,0,0,0}},\n\
\ {inet_dist_listen_min, 25672},\n {inet_dist_listen_max, 25672}\n ]},\n\
\ {rabbit, [\n {tcp_listeners, [\n {\"0.0.0.0\", 5672}\n ]} ]},\n\
\ {rabbitmq_management, [\n {listener, [\n {ip, \"0.0.0.0\"},\n \
\ {port, 15672}\n ]},\n {load_definitions, \"/etc/rabbitmq/definitions.json\"\
}\n ]}].\n% EOF\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: rabbitmq, namespace: kolla}
configmap "rabbitmq" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/bin/memcached -vv -l 0.0.0.0 -p 11211\"\
,\n \"config_files\": []\n}\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: memcached, namespace: kolla}
configmap "memcached" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"nova-api\",\n \"config_files\": [\n\
\ {\n \"source\": \"/var/lib/kolla/config_files/nova.conf\"\
,\n \"dest\": \"/etc/nova/nova.conf\",\n \"owner\": \"nova\"\
,\n \"perm\": \"0600\"\n }\n ]\n}\n", nova.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/nova
use_forwarded_for = true
api_paste_config = /etc/nova/api-paste.ini
state_path = /var/lib/nova
osapi_compute_listen = 0.0.0.0
osapi_compute_listen_port = 8774
osapi_compute_workers = 1
metadata_workers = 1
metadata_listen = 0.0.0.0
metadata_listen_port = 8775
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
scheduler_max_attempts = 10
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
allow_resize_to_same_host = true
compute_driver = libvirt.LibvirtDriver
my_ip = 0.0.0.0
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[conductor]
workers = 1
[vnc]
novncproxy_host = 0.0.0.0
novncproxy_port = 6080
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 0.0.0.0
novncproxy_base_url = http://kolla_kubernetes_external_vip:6080/vnc_auto.html
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[glance]
api_servers = http://glance-api:9292
num_retries = 1
[cinder]
catalog_info = volume:cinder:internalURL
[neutron]
url = http://neutron-server:9696
metadata_proxy_shared_secret = 5He8wFTGKrBoTq6cdrMCGlxuCQhqjiLUxIn7dFft
service_metadata_proxy = true
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_name = default
user_domain_id = default
project_name = service
username = neutron
password = r2tJPOPQF4K6eYPeHY0nQpv832DfjsxdI6WhVWFr
[database]
connection = mysql+pymysql://nova:Lz7rRQqN3h7WM37ObvNr6NbjOEz2OjifHXyvyO3e@mariadb/nova
max_pool_size = 50
max_overflow = 1000
max_retries = -1
[api_database]
connection = mysql+pymysql://nova_api:JN4eR2xhKHBOiT6YMZ8Uc8fe8vL4YhfOMQXt2Q5S@mariadb/nova_api
max_retries = -1
[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = memcached:11211
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = SCahagB7meWSTRjwAsPAt3vWOCvp4qNr0rPOhqMr
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcached_servers = memcached:11211
[libvirt]
connection_uri = "qemu+tcp://0.0.0.0/system"
[upgrade_levels]
compute = auto
[oslo_messaging_notifications]
driver = noop
[privsep_entrypoint]
helper_command = sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file
/etc/nova/nova.conf
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: nova-api, namespace: kolla}
configmap "nova-api" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"nova-conductor\",\n \"config_files\"\
: [\n {\n \"source\": \"/var/lib/kolla/config_files/nova.conf\"\
,\n \"dest\": \"/etc/nova/nova.conf\",\n \"owner\": \"nova\"\
,\n \"perm\": \"0600\"\n }\n ]\n}\n", nova.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/nova
use_forwarded_for = true
api_paste_config = /etc/nova/api-paste.ini
state_path = /var/lib/nova
osapi_compute_listen = 0.0.0.0
osapi_compute_listen_port = 8774
osapi_compute_workers = 1
metadata_workers = 1
metadata_listen = 0.0.0.0
metadata_listen_port = 8775
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
scheduler_max_attempts = 10
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
allow_resize_to_same_host = true
compute_driver = libvirt.LibvirtDriver
my_ip = 0.0.0.0
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[conductor]
workers = 1
[vnc]
novncproxy_host = 0.0.0.0
novncproxy_port = 6080
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 0.0.0.0
novncproxy_base_url = http://kolla_kubernetes_external_vip:6080/vnc_auto.html
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[glance]
api_servers = http://glance-api:9292
num_retries = 1
[cinder]
catalog_info = volume:cinder:internalURL
[neutron]
url = http://neutron-server:9696
metadata_proxy_shared_secret = 5He8wFTGKrBoTq6cdrMCGlxuCQhqjiLUxIn7dFft
service_metadata_proxy = true
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_name = default
user_domain_id = default
project_name = service
username = neutron
password = r2tJPOPQF4K6eYPeHY0nQpv832DfjsxdI6WhVWFr
[database]
connection = mysql+pymysql://nova:Lz7rRQqN3h7WM37ObvNr6NbjOEz2OjifHXyvyO3e@mariadb/nova
max_pool_size = 50
max_overflow = 1000
max_retries = -1
[api_database]
connection = mysql+pymysql://nova_api:JN4eR2xhKHBOiT6YMZ8Uc8fe8vL4YhfOMQXt2Q5S@mariadb/nova_api
max_retries = -1
[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = memcached:11211
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = SCahagB7meWSTRjwAsPAt3vWOCvp4qNr0rPOhqMr
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcached_servers = memcached:11211
[libvirt]
connection_uri = "qemu+tcp://0.0.0.0/system"
[upgrade_levels]
compute = auto
[oslo_messaging_notifications]
driver = noop
[privsep_entrypoint]
helper_command = sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file
/etc/nova/nova.conf
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: nova-conductor, namespace: kolla}
configmap "nova-conductor" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"nova-scheduler\",\n \"config_files\"\
: [\n {\n \"source\": \"/var/lib/kolla/config_files/nova.conf\"\
,\n \"dest\": \"/etc/nova/nova.conf\",\n \"owner\": \"nova\"\
,\n \"perm\": \"0600\"\n }\n ]\n}\n", nova.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/nova
use_forwarded_for = true
api_paste_config = /etc/nova/api-paste.ini
state_path = /var/lib/nova
osapi_compute_listen = 0.0.0.0
osapi_compute_listen_port = 8774
osapi_compute_workers = 1
metadata_workers = 1
metadata_listen = 0.0.0.0
metadata_listen_port = 8775
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
scheduler_max_attempts = 10
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
allow_resize_to_same_host = true
compute_driver = libvirt.LibvirtDriver
my_ip = 0.0.0.0
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[conductor]
workers = 1
[vnc]
novncproxy_host = 0.0.0.0
novncproxy_port = 6080
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 0.0.0.0
novncproxy_base_url = http://kolla_kubernetes_external_vip:6080/vnc_auto.html
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[glance]
api_servers = http://glance-api:9292
num_retries = 1
[cinder]
catalog_info = volume:cinder:internalURL
[neutron]
url = http://neutron-server:9696
metadata_proxy_shared_secret = 5He8wFTGKrBoTq6cdrMCGlxuCQhqjiLUxIn7dFft
service_metadata_proxy = true
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_name = default
user_domain_id = default
project_name = service
username = neutron
password = r2tJPOPQF4K6eYPeHY0nQpv832DfjsxdI6WhVWFr
[database]
connection = mysql+pymysql://nova:Lz7rRQqN3h7WM37ObvNr6NbjOEz2OjifHXyvyO3e@mariadb/nova
max_pool_size = 50
max_overflow = 1000
max_retries = -1
[api_database]
connection = mysql+pymysql://nova_api:JN4eR2xhKHBOiT6YMZ8Uc8fe8vL4YhfOMQXt2Q5S@mariadb/nova_api
max_retries = -1
[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = memcached:11211
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = SCahagB7meWSTRjwAsPAt3vWOCvp4qNr0rPOhqMr
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcached_servers = memcached:11211
[libvirt]
connection_uri = "qemu+tcp://0.0.0.0/system"
[upgrade_levels]
compute = auto
[oslo_messaging_notifications]
driver = noop
[privsep_entrypoint]
helper_command = sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file
/etc/nova/nova.conf
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: nova-scheduler, namespace: kolla}
configmap "nova-scheduler" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg\
\ -p /run/haproxy.pid\",\n \"config_files\": [{\n \"source\": \"/var/lib/kolla/config_files/haproxy.cfg\"\
,\n \"dest\": \"/etc/haproxy/haproxy.cfg\",\n \"owner\": \"root\"\
,\n \"perm\": \"0644\"\n }]\n}\n", haproxy.cfg: "global\n chroot /var/lib/haproxy\n\
\ user haproxy\n group haproxy\n daemon\n log /var/lib/kolla/heka/log local0\n\
\ maxconn 4000\n # commented out for now. Doesn't work on haproxy from kolla\
\ 2.x\n # its unused anyway.\n #stats socket /var/lib/kolla/haproxy/haproxy.sock\n\
\ndefaults\n log global\n mode http\n option redispatch\n option httplog\n\
\ option forwardfor\n retries 3\n timeout http-request 10s\n timeout queue\
\ 1m\n timeout connect 10s\n timeout client 1m\n timeout server 1m\n timeout\
\ check 10s\n\nlisten api\n bind 0.0.0.0:9292\n server local-api 127.0.0.1:8080\
\ check inter 2000 rise 2 fall 5\n"}
kind: ConfigMap
metadata: {name: glance-api-haproxy, namespace: kolla}
configmap "glance-api-haproxy" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg\
\ -p /run/haproxy.pid\",\n \"config_files\": [{\n \"source\": \"/var/lib/kolla/config_files/haproxy.cfg\"\
,\n \"dest\": \"/etc/haproxy/haproxy.cfg\",\n \"owner\": \"root\"\
,\n \"perm\": \"0644\"\n }]\n}\n", haproxy.cfg: "global\n chroot /var/lib/haproxy\n\
\ user haproxy\n group haproxy\n daemon\n log /var/lib/kolla/heka/log local0\n\
\ maxconn 4000\n # commented out for now. Doesn't work on haproxy from kolla\
\ 2.x\n # its unused anyway.\n #stats socket /var/lib/kolla/haproxy/haproxy.sock\n\
\ndefaults\n log global\n mode http\n option redispatch\n option httplog\n\
\ option forwardfor\n retries 3\n timeout http-request 10s\n timeout queue\
\ 1m\n timeout connect 10s\n timeout client 1m\n timeout server 1m\n timeout\
\ check 10s\n\nlisten api\n bind 0.0.0.0:9191\n server local-api 127.0.0.1:8080\
\ check inter 2000 rise 2 fall 5\n"}
kind: ConfigMap
metadata: {name: glance-registry-haproxy, namespace: kolla}
configmap "glance-registry-haproxy" created
apiVersion: v1
data: {config.json: "{\n \"config_files\": [\n {\n \"dest\":\
\ \"/etc/glance/glance-api.conf\", \n \"source\": \"/var/lib/kolla/config_files/glance-api.conf\"\
, \n \"perm\": \"0600\", \n \"owner\": \"glance\"\n \
\ }, \n {\n \"dest\": \"/etc/ceph/ceph.client.glance.keyring\"\
, \n \"source\": \"/var/lib/kolla/config_files/ceph.client.glance.keyring\"\
, \n \"owner\": \"glance\", \n \"perm\": \"0700\"\n \
\ }, \n {\n \"dest\": \"/etc/ceph/ceph.conf\", \n \
\ \"source\": \"/var/lib/kolla/config_files/ceph.conf\", \n \"\
owner\": \"glance\", \n \"perm\": \"0700\"\n }\n ], \n \
\ \"command\": \"glance-api\"\n}", glance-api.conf: '[DEFAULT]
debug = True
log_file = /var/log/kolla/glance/api.log
use_forwarded_for = true
bind_host = 0.0.0.0
bind_port = 9292
workers = 1
registry_host = glance-registry
show_image_direct_url = True
show_multiple_locations = True
cinder_catalog_info = volume:cinder:internalURL
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[database]
connection = mysql+pymysql://glance:0dR0ixKdKtVkkb13CRFN1QAz5Q8wBU5tLVdRVqip@mariadb/glance
max_retries = -1
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = 3W1ctxRUXGu965JHuk7Uyz2KUCzi2zb3cJ6dMMkc
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcache_servers = memcached
[paste_deploy]
flavor = keystone
[glance_store]
default_store = rbd
stores = rbd
rbd_store_user = glance
rbd_store_pool = images
rbd_store_chunk_size = 8
[oslo_messaging_notifications]
driver = noop
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: glance-api, namespace: kolla}
configmap "glance-api" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"glance-registry\",\n \"config_files\"\
: [\n {\n \"source\": \"/var/lib/kolla/config_files/glance-registry.conf\"\
,\n \"dest\": \"/etc/glance/glance-registry.conf\",\n \"\
owner\": \"glance\",\n \"perm\": \"0600\"\n }\n ]\n}\n",
glance-registry.conf: '[DEFAULT]
debug = True
log_file = /var/log/kolla/glance/registry.log
bind_host = 0.0.0.0
bind_port = 9191
workers = 1
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[database]
connection = mysql+pymysql://glance:0dR0ixKdKtVkkb13CRFN1QAz5Q8wBU5tLVdRVqip@mariadb/glance
max_retries = -1
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = 3W1ctxRUXGu965JHuk7Uyz2KUCzi2zb3cJ6dMMkc
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcache_servers = memcached
[paste_deploy]
flavor = keystone
[oslo_messaging_notifications]
driver = noop
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: glance-registry, namespace: kolla}
configmap "glance-registry" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"neutron-server --config-file /etc/neutron/neutron.conf\
\ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/neutron_lbaas.conf\
\ --config-file /etc/neutron/neutron_vpnaas.conf\",\n \"config_files\": [\n\
\ {\n \"source\": \"/var/lib/kolla/config_files/neutron.conf\"\
,\n \"dest\": \"/etc/neutron/neutron.conf\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/neutron_lbaas.conf\",\n \
\ \"dest\": \"/etc/neutron/neutron_lbaas.conf\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/neutron_vpnaas.conf\",\n \
\ \"dest\": \"/etc/neutron/neutron_vpnaas.conf\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/ml2_conf.ini\",\n \
\ \"dest\": \"/etc/neutron/plugins/ml2/ml2_conf.ini\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n }\n ]\n}\n", ml2-conf.ini: "[ml2]\n\
type_drivers = flat,vlan,vxlan\ntenant_network_types = vxlan\nmechanism_drivers\
\ = openvswitch,l2population\n\n[ml2_type_vlan]\nnetwork_vlan_ranges = \n\n[ml2_type_flat]\n\
flat_networks = physnet1\n\n[ml2_type_vxlan]\nvni_ranges = 1:1000\nvxlan_group\
\ = 239.1.1.1\n\n[securitygroup]\nfirewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver\n\
\n[agent]\ntunnel_types = vxlan\nl2_population = true\narp_responder = true\n\n\
[ovs]\nbridge_mappings = physnet1:br-ex\novsdb_connection = tcp:0.0.0.0:6640\n\
local_ip = 0.0.0.0\n\n", neutron-lbaas.conf: '', neutron-vpnaas.conf: '', neutron.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/neutron
use_stderr = False
bind_host = 0.0.0.0
bind_port = 9696
api_paste_config = /usr/share/neutron/api-paste.ini
endpoint_type = internalURL
api_workers = 1
metadata_works = 1
metadata_proxy_socket = /var/lib/neutron/kolla/metadata_proxy
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
allow_overlapping_ips = true
core_plugin = ml2
service_plugins = router
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[nova]
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = SCahagB7meWSTRjwAsPAt3vWOCvp4qNr0rPOhqMr
endpoint_type = internal
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
[database]
connection = mysql+pymysql://neutron:bz8Ei51omtAwnXHtTsFPpWD37ONQW2DyfS5NXt6w@mariadb/neutron
max_retries = -1
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = r2tJPOPQF4K6eYPeHY0nQpv832DfjsxdI6WhVWFr
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcache_servers = memcached
[oslo_messaging_notifications]
driver = noop
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: neutron-server, namespace: kolla}
configmap "neutron-server" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"neutron-dhcp-agent --config-file /etc/neutron/neutron.conf\
\ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/dhcp_agent.ini\"\
,\n \"config_files\": [\n {\n \"source\": \"/var/lib/kolla/config_files/neutron.conf\"\
,\n \"dest\": \"/etc/neutron/neutron.conf\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/ml2_conf.ini\",\n \
\ \"dest\": \"/etc/neutron/plugins/ml2/ml2_conf.ini\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/dhcp_agent.ini\",\n \
\ \"dest\": \"/etc/neutron/dhcp_agent.ini\",\n \"owner\": \"neutron\"\
,\n \"perm\": \"0600\"\n },\n {\n \"source\"\
: \"/var/lib/kolla/config_files/dnsmasq.conf\",\n \"dest\": \"/etc/neutron/dnsmasq.conf\"\
,\n \"owner\": \"neutron\",\n \"perm\": \"0600\"\n \
\ }\n ]\n}\n", dhcp-agent.ini: '[DEFAULT]
dnsmasq_config_file = /etc/neutron/dnsmasq.conf
enable_isolated_metadata = true
force_metadata = true
', dnsmasq.conf: 'log-facility=/var/log/kolla/neutron/dnsmasq.log
', ml2-conf.ini: "[ml2]\ntype_drivers = flat,vlan,vxlan\ntenant_network_types\
\ = vxlan\nmechanism_drivers = openvswitch,l2population\n\n[ml2_type_vlan]\nnetwork_vlan_ranges\
\ = \n\n[ml2_type_flat]\nflat_networks = physnet1\n\n[ml2_type_vxlan]\nvni_ranges\
\ = 1:1000\nvxlan_group = 239.1.1.1\n\n[securitygroup]\nfirewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver\n\
\n[agent]\ntunnel_types = vxlan\nl2_population = true\narp_responder = true\n\n\
[ovs]\nbridge_mappings = physnet1:br-ex\novsdb_connection = tcp:0.0.0.0:6640\n\
local_ip = 0.0.0.0\n\n", neutron.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/neutron
use_stderr = False
bind_host = 0.0.0.0
bind_port = 9696
api_paste_config = /usr/share/neutron/api-paste.ini
endpoint_type = internalURL
api_workers = 1
metadata_works = 1
metadata_proxy_socket = /var/lib/neutron/kolla/metadata_proxy
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
allow_overlapping_ips = true
core_plugin = ml2
service_plugins = router
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[nova]
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = SCahagB7meWSTRjwAsPAt3vWOCvp4qNr0rPOhqMr
endpoint_type = internal
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
[database]
connection = mysql+pymysql://neutron:bz8Ei51omtAwnXHtTsFPpWD37ONQW2DyfS5NXt6w@mariadb/neutron
max_retries = -1
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = r2tJPOPQF4K6eYPeHY0nQpv832DfjsxdI6WhVWFr
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcache_servers = memcached
[oslo_messaging_notifications]
driver = noop
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: neutron-dhcp-agent, namespace: kolla}
configmap "neutron-dhcp-agent" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"neutron-l3-agent --config-file /etc/neutron/neutron.conf\
\ --config-file /etc/neutron/l3_agent.ini --config-file /etc/neutron/fwaas_driver.ini\
\ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini\",\n \"config_files\"\
: [\n {\n \"source\": \"/var/lib/kolla/config_files/neutron.conf\"\
,\n \"dest\": \"/etc/neutron/neutron.conf\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/ml2_conf.ini\",\n \
\ \"dest\": \"/etc/neutron/plugins/ml2/ml2_conf.ini\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/fwaas_driver.ini\",\n \
\ \"dest\": \"/etc/neutron/fwaas_driver.ini\",\n \"owner\": \"\
neutron\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/l3_agent.ini\",\n \"dest\"\
: \"/etc/neutron/l3_agent.ini\",\n \"owner\": \"neutron\",\n \
\ \"perm\": \"0600\"\n }\n ]\n}\n", fwaas-driver.ini: '[fwaas]
', l3-agent.ini: "[DEFAULT]\nagent_mode = legacy\nexternal_network_bridge = \n\
\n", ml2-conf.ini: "[ml2]\ntype_drivers = flat,vlan,vxlan\ntenant_network_types\
\ = vxlan\nmechanism_drivers = openvswitch,l2population\n\n[ml2_type_vlan]\nnetwork_vlan_ranges\
\ = \n\n[ml2_type_flat]\nflat_networks = physnet1\n\n[ml2_type_vxlan]\nvni_ranges\
\ = 1:1000\nvxlan_group = 239.1.1.1\n\n[securitygroup]\nfirewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver\n\
\n[agent]\ntunnel_types = vxlan\nl2_population = true\narp_responder = true\n\n\
[ovs]\nbridge_mappings = physnet1:br-ex\novsdb_connection = tcp:0.0.0.0:6640\n\
local_ip = 0.0.0.0\n\n", neutron.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/neutron
use_stderr = False
bind_host = 0.0.0.0
bind_port = 9696
api_paste_config = /usr/share/neutron/api-paste.ini
endpoint_type = internalURL
api_workers = 1
metadata_works = 1
metadata_proxy_socket = /var/lib/neutron/kolla/metadata_proxy
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
allow_overlapping_ips = true
core_plugin = ml2
service_plugins = router
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[nova]
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = SCahagB7meWSTRjwAsPAt3vWOCvp4qNr0rPOhqMr
endpoint_type = internal
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
[database]
connection = mysql+pymysql://neutron:bz8Ei51omtAwnXHtTsFPpWD37ONQW2DyfS5NXt6w@mariadb/neutron
max_retries = -1
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = r2tJPOPQF4K6eYPeHY0nQpv832DfjsxdI6WhVWFr
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcache_servers = memcached
[oslo_messaging_notifications]
driver = noop
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: neutron-l3-agent, namespace: kolla}
configmap "neutron-l3-agent" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"neutron-metadata-agent --config-file /etc/neutron/neutron.conf\
\ --config-file /etc/neutron/metadata_agent.ini\",\n \"config_files\": [\n\
\ {\n \"source\": \"/var/lib/kolla/config_files/neutron.conf\"\
,\n \"dest\": \"/etc/neutron/neutron.conf\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/ml2_conf.ini\",\n \
\ \"dest\": \"/etc/neutron/plugins/ml2/ml2_conf.ini\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/metadata_agent.ini\",\n \
\ \"dest\": \"/etc/neutron/metadata_agent.ini\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n }\n ]\n}\n", metadata-agent.ini: '[DEFAULT]
nova_metadata_ip = nova-metadata
nova_metadata_port = 8775
metadata_proxy_shared_secret = 5He8wFTGKrBoTq6cdrMCGlxuCQhqjiLUxIn7dFft
', ml2-conf.ini: "[ml2]\ntype_drivers = flat,vlan,vxlan\ntenant_network_types\
\ = vxlan\nmechanism_drivers = openvswitch,l2population\n\n[ml2_type_vlan]\nnetwork_vlan_ranges\
\ = \n\n[ml2_type_flat]\nflat_networks = physnet1\n\n[ml2_type_vxlan]\nvni_ranges\
\ = 1:1000\nvxlan_group = 239.1.1.1\n\n[securitygroup]\nfirewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver\n\
\n[agent]\ntunnel_types = vxlan\nl2_population = true\narp_responder = true\n\n\
[ovs]\nbridge_mappings = physnet1:br-ex\novsdb_connection = tcp:0.0.0.0:6640\n\
local_ip = 0.0.0.0\n\n", neutron.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/neutron
use_stderr = False
bind_host = 0.0.0.0
bind_port = 9696
api_paste_config = /usr/share/neutron/api-paste.ini
endpoint_type = internalURL
api_workers = 1
metadata_works = 1
metadata_proxy_socket = /var/lib/neutron/kolla/metadata_proxy
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
allow_overlapping_ips = true
core_plugin = ml2
service_plugins = router
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[nova]
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = SCahagB7meWSTRjwAsPAt3vWOCvp4qNr0rPOhqMr
endpoint_type = internal
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
[database]
connection = mysql+pymysql://neutron:bz8Ei51omtAwnXHtTsFPpWD37ONQW2DyfS5NXt6w@mariadb/neutron
max_retries = -1
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = r2tJPOPQF4K6eYPeHY0nQpv832DfjsxdI6WhVWFr
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcache_servers = memcached
[oslo_messaging_notifications]
driver = noop
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: neutron-metadata-agent, namespace: kolla}
configmap "neutron-metadata-agent" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"neutron-openvswitch-agent --config-file\
\ /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini\"\
,\n \"config_files\": [\n {\n \"source\": \"/var/lib/kolla/config_files/neutron.conf\"\
,\n \"dest\": \"/etc/neutron/neutron.conf\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/ml2_conf.ini\",\n \
\ \"dest\": \"/etc/neutron/plugins/ml2/ml2_conf.ini\",\n \"owner\"\
: \"neutron\",\n \"perm\": \"0600\"\n }\n ]\n}\n", ml2-conf.ini: "[ml2]\n\
type_drivers = flat,vlan,vxlan\ntenant_network_types = vxlan\nmechanism_drivers\
\ = openvswitch,l2population\n\n[ml2_type_vlan]\nnetwork_vlan_ranges = \n\n[ml2_type_flat]\n\
flat_networks = physnet1\n\n[ml2_type_vxlan]\nvni_ranges = 1:1000\nvxlan_group\
\ = 239.1.1.1\n\n[securitygroup]\nfirewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver\n\
\n[agent]\ntunnel_types = vxlan\nl2_population = true\narp_responder = true\n\n\
[ovs]\nbridge_mappings = physnet1:br-ex\novsdb_connection = tcp:0.0.0.0:6640\n\
local_ip = 0.0.0.0\n\n", neutron.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/neutron
use_stderr = False
bind_host = 0.0.0.0
bind_port = 9696
api_paste_config = /usr/share/neutron/api-paste.ini
endpoint_type = internalURL
api_workers = 1
metadata_works = 1
metadata_proxy_socket = /var/lib/neutron/kolla/metadata_proxy
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
allow_overlapping_ips = true
core_plugin = ml2
service_plugins = router
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[nova]
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = SCahagB7meWSTRjwAsPAt3vWOCvp4qNr0rPOhqMr
endpoint_type = internal
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
[database]
connection = mysql+pymysql://neutron:bz8Ei51omtAwnXHtTsFPpWD37ONQW2DyfS5NXt6w@mariadb/neutron
max_retries = -1
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = r2tJPOPQF4K6eYPeHY0nQpv832DfjsxdI6WhVWFr
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcache_servers = memcached
[oslo_messaging_notifications]
driver = noop
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: neutron-openvswitch-agent, namespace: kolla}
configmap "neutron-openvswitch-agent" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"start-ovsdb-server 0.0.0.0 br-ex eth1\
\ \",\n \"config_files\": []\n}\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: openvswitch-db-server, namespace: kolla}
configmap "openvswitch-db-server" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock\
\ -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/kolla/openvswitch/ovs-vswitchd.log\"\
,\n \"config_files\": []\n}\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: openvswitch-vswitchd, namespace: kolla}
configmap "openvswitch-vswitchd" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/sbin/libvirtd --listen\",\n \"\
config_files\": [\n {\n \"source\": \"/var/lib/kolla/config_files/libvirtd.conf\"\
,\n \"dest\": \"/etc/libvirt/libvirtd.conf\",\n \"owner\"\
: \"root\",\n \"perm\": \"0644\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/qemu.conf\",\n \"dest\"\
: \"/etc/libvirt/qemu.conf\",\n \"owner\": \"root\",\n \"\
perm\": \"0644\"\n } ]\n}\n", libvirtd.conf: 'listen_tcp = 1
auth_tcp = "none"
ca_file = ""
log_level = 3
log_outputs = "3:file:/var/log/kolla/libvirt/libvirtd.log"
listen_addr = "0.0.0.0"
', qemu.conf: 'stdio_handler = "file"
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: nova-libvirt, namespace: kolla}
configmap "nova-libvirt" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"nova-compute\",\n \"config_files\":\
\ [\n {\n \"source\": \"/var/lib/kolla/config_files/nova.conf\"\
,\n \"dest\": \"/etc/nova/nova.conf\",\n \"owner\": \"nova\"\
,\n \"perm\": \"0600\"\n } ]\n}\n", nova.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/nova
use_forwarded_for = true
api_paste_config = /etc/nova/api-paste.ini
state_path = /var/lib/nova
osapi_compute_listen = 0.0.0.0
osapi_compute_listen_port = 8774
osapi_compute_workers = 1
metadata_workers = 1
metadata_listen = 0.0.0.0
metadata_listen_port = 8775
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
scheduler_max_attempts = 10
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
allow_resize_to_same_host = true
compute_driver = libvirt.LibvirtDriver
my_ip = 0.0.0.0
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[conductor]
workers = 1
[vnc]
novncproxy_host = 0.0.0.0
novncproxy_port = 6080
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 0.0.0.0
novncproxy_base_url = http://kolla_kubernetes_external_vip:6080/vnc_auto.html
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[glance]
api_servers = http://glance-api:9292
num_retries = 1
[cinder]
catalog_info = volume:cinder:internalURL
[neutron]
url = http://neutron-server:9696
metadata_proxy_shared_secret = 5He8wFTGKrBoTq6cdrMCGlxuCQhqjiLUxIn7dFft
service_metadata_proxy = true
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_name = default
user_domain_id = default
project_name = service
username = neutron
password = r2tJPOPQF4K6eYPeHY0nQpv832DfjsxdI6WhVWFr
[database]
connection = mysql+pymysql://nova:Lz7rRQqN3h7WM37ObvNr6NbjOEz2OjifHXyvyO3e@mariadb/nova
max_pool_size = 50
max_overflow = 1000
max_retries = -1
[api_database]
connection = mysql+pymysql://nova_api:JN4eR2xhKHBOiT6YMZ8Uc8fe8vL4YhfOMQXt2Q5S@mariadb/nova_api
max_retries = -1
[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = memcached:11211
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = SCahagB7meWSTRjwAsPAt3vWOCvp4qNr0rPOhqMr
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcached_servers = memcached:11211
[libvirt]
connection_uri = "qemu+tcp://0.0.0.0/system"
virt_type = qemu
[upgrade_levels]
compute = auto
[oslo_messaging_notifications]
driver = noop
[privsep_entrypoint]
helper_command = sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file
/etc/nova/nova.conf
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: nova-compute, namespace: kolla}
configmap "nova-compute" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"nova-consoleauth\",\n \"config_files\"\
: [\n {\n \"source\": \"/var/lib/kolla/config_files/nova.conf\"\
,\n \"dest\": \"/etc/nova/nova.conf\",\n \"owner\": \"nova\"\
,\n \"perm\": \"0600\"\n }\n ]\n}\n", nova.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/nova
use_forwarded_for = true
api_paste_config = /etc/nova/api-paste.ini
state_path = /var/lib/nova
osapi_compute_listen = 0.0.0.0
osapi_compute_listen_port = 8774
osapi_compute_workers = 1
metadata_workers = 1
metadata_listen = 0.0.0.0
metadata_listen_port = 8775
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
scheduler_max_attempts = 10
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
allow_resize_to_same_host = true
compute_driver = libvirt.LibvirtDriver
my_ip = 0.0.0.0
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[conductor]
workers = 1
[vnc]
novncproxy_host = 0.0.0.0
novncproxy_port = 6080
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 0.0.0.0
novncproxy_base_url = http://kolla_kubernetes_external_vip:6080/vnc_auto.html
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[glance]
api_servers = http://glance-api:9292
num_retries = 1
[cinder]
catalog_info = volume:cinder:internalURL
[neutron]
url = http://neutron-server:9696
metadata_proxy_shared_secret = 5He8wFTGKrBoTq6cdrMCGlxuCQhqjiLUxIn7dFft
service_metadata_proxy = true
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_name = default
user_domain_id = default
project_name = service
username = neutron
password = r2tJPOPQF4K6eYPeHY0nQpv832DfjsxdI6WhVWFr
[database]
connection = mysql+pymysql://nova:Lz7rRQqN3h7WM37ObvNr6NbjOEz2OjifHXyvyO3e@mariadb/nova
max_pool_size = 50
max_overflow = 1000
max_retries = -1
[api_database]
connection = mysql+pymysql://nova_api:JN4eR2xhKHBOiT6YMZ8Uc8fe8vL4YhfOMQXt2Q5S@mariadb/nova_api
max_retries = -1
[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = memcached:11211
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = SCahagB7meWSTRjwAsPAt3vWOCvp4qNr0rPOhqMr
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcached_servers = memcached:11211
[libvirt]
connection_uri = "qemu+tcp://0.0.0.0/system"
[upgrade_levels]
compute = auto
[oslo_messaging_notifications]
driver = noop
[privsep_entrypoint]
helper_command = sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file
/etc/nova/nova.conf
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: nova-consoleauth, namespace: kolla}
configmap "nova-consoleauth" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"nova-novncproxy\",\n \"config_files\"\
: [\n {\n \"source\": \"/var/lib/kolla/config_files/nova.conf\"\
,\n \"dest\": \"/etc/nova/nova.conf\",\n \"owner\": \"nova\"\
,\n \"perm\": \"0600\"\n }\n ]\n}\n", nova.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/nova
use_forwarded_for = true
api_paste_config = /etc/nova/api-paste.ini
state_path = /var/lib/nova
osapi_compute_listen = 0.0.0.0
osapi_compute_listen_port = 8774
osapi_compute_workers = 1
metadata_workers = 1
metadata_listen = 0.0.0.0
metadata_listen_port = 8775
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
scheduler_max_attempts = 10
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
allow_resize_to_same_host = true
compute_driver = libvirt.LibvirtDriver
my_ip = 0.0.0.0
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[conductor]
workers = 1
[vnc]
novncproxy_host = 0.0.0.0
novncproxy_port = 6080
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 0.0.0.0
novncproxy_base_url = http://kolla_kubernetes_external_vip:6080/vnc_auto.html
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[glance]
api_servers = http://glance-api:9292
num_retries = 1
[cinder]
catalog_info = volume:cinder:internalURL
[neutron]
url = http://neutron-server:9696
metadata_proxy_shared_secret = 5He8wFTGKrBoTq6cdrMCGlxuCQhqjiLUxIn7dFft
service_metadata_proxy = true
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_name = default
user_domain_id = default
project_name = service
username = neutron
password = r2tJPOPQF4K6eYPeHY0nQpv832DfjsxdI6WhVWFr
[database]
connection = mysql+pymysql://nova:Lz7rRQqN3h7WM37ObvNr6NbjOEz2OjifHXyvyO3e@mariadb/nova
max_pool_size = 50
max_overflow = 1000
max_retries = -1
[api_database]
connection = mysql+pymysql://nova_api:JN4eR2xhKHBOiT6YMZ8Uc8fe8vL4YhfOMQXt2Q5S@mariadb/nova_api
max_retries = -1
[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = memcached:11211
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = SCahagB7meWSTRjwAsPAt3vWOCvp4qNr0rPOhqMr
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcached_servers = memcached:11211
[libvirt]
connection_uri = "qemu+tcp://0.0.0.0/system"
[upgrade_levels]
compute = auto
[oslo_messaging_notifications]
driver = noop
[privsep_entrypoint]
helper_command = sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file
/etc/nova/nova.conf
'}
kind: ConfigMap
metadata: {creationTimestamp: null, name: nova-novncproxy, namespace: kolla}
configmap "nova-novncproxy" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg\
\ -p /run/haproxy.pid\",\n \"config_files\": [{\n \"source\": \"/var/lib/kolla/config_files/haproxy.cfg\"\
,\n \"dest\": \"/etc/haproxy/haproxy.cfg\",\n \"owner\": \"root\"\
,\n \"perm\": \"0644\"\n }]\n}\n", haproxy.cfg: "global\n chroot /var/lib/haproxy\n\
\ user haproxy\n group haproxy\n daemon\n log /var/lib/kolla/heka/log local0\n\
\ maxconn 4000\n # commented out for now. Doesn't work on haproxy from kolla\
\ 2.x\n # its unused anyway.\n #stats socket /var/lib/kolla/haproxy/haproxy.sock\n\
\ndefaults\n log global\n mode http\n option redispatch\n option httplog\n\
\ option forwardfor\n retries 3\n timeout http-request 10s\n timeout queue\
\ 1m\n timeout connect 10s\n timeout client 1m\n timeout server 1m\n timeout\
\ check 10s\n\nlisten api\n bind 0.0.0.0:6080\n server local-api 127.0.0.1:8080\
\ check inter 2000 rise 2 fall 5\n"}
kind: ConfigMap
metadata: {name: nova-novncproxy-haproxy, namespace: kolla}
configmap "nova-novncproxy-haproxy" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg\
\ -p /run/haproxy.pid\",\n \"config_files\": [{\n \"source\": \"/var/lib/kolla/config_files/haproxy.cfg\"\
,\n \"dest\": \"/etc/haproxy/haproxy.cfg\",\n \"owner\": \"root\"\
,\n \"perm\": \"0644\"\n }]\n}\n", haproxy.cfg: "global\n chroot /var/lib/haproxy\n\
\ user haproxy\n group haproxy\n daemon\n log /var/lib/kolla/heka/log local0\n\
\ maxconn 4000\n # commented out for now. Doesn't work on haproxy from kolla\
\ 2.x\n # its unused anyway.\n #stats socket /var/lib/kolla/haproxy/haproxy.sock\n\
\ndefaults\n log global\n mode http\n option redispatch\n option httplog\n\
\ option forwardfor\n retries 3\n timeout http-request 10s\n timeout queue\
\ 1m\n timeout connect 10s\n timeout client 1m\n timeout server 1m\n timeout\
\ check 10s\n\nlisten api\n bind 0.0.0.0:9696\n server local-api 127.0.0.1:8080\
\ check inter 2000 rise 2 fall 5\n"}
kind: ConfigMap
metadata: {name: neutron-server-haproxy, namespace: kolla}
configmap "neutron-server-haproxy" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg\
\ -p /run/haproxy.pid\",\n \"config_files\": [{\n \"source\": \"/var/lib/kolla/config_files/haproxy.cfg\"\
,\n \"dest\": \"/etc/haproxy/haproxy.cfg\",\n \"owner\": \"root\"\
,\n \"perm\": \"0644\"\n }]\n}\n", haproxy.cfg: "global\n chroot /var/lib/haproxy\n\
\ user haproxy\n group haproxy\n daemon\n log /var/lib/kolla/heka/log local0\n\
\ maxconn 4000\n # commented out for now. Doesn't work on haproxy from kolla\
\ 2.x\n # its unused anyway.\n #stats socket /var/lib/kolla/haproxy/haproxy.sock\n\
\ndefaults\n log global\n mode http\n option redispatch\n option httplog\n\
\ option forwardfor\n retries 3\n timeout http-request 10s\n timeout queue\
\ 1m\n timeout connect 10s\n timeout client 1m\n timeout server 1m\n timeout\
\ check 10s\n\nlisten api\n bind 0.0.0.0:8774\n server local-api 127.0.0.1:8080\
\ check inter 2000 rise 2 fall 5\n\n\nlisten metadata\n bind 0.0.0.0:8775\n \
\ server local-meta 127.0.0.1:8081 check inter 2000 rise 2 fall 5\n"}
kind: ConfigMap
metadata: {name: nova-api-haproxy, namespace: kolla}
configmap "nova-api-haproxy" created
apiVersion: v1
data: {cinder.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/cinder
use_forwarded_for = true
use_stderr = False
enable_v1_api = false
osapi_volume_workers = 1
volume_name_template = volume-%s
glance_api_servers = http://glance-api:9292
glance_num_retries = 1
glance_api_version = 2
os_region_name = RegionOne
enabled_backends = rbd-1
osapi_volume_listen = 0.0.0.0
osapi_volume_listen_port = 8776
api_paste_config = /etc/cinder/api-paste.ini
nova_catalog_info = compute:nova:internalURL
auth_strategy = keystone
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[oslo_messaging_notifications]
[database]
connection = mysql+pymysql://cinder:pPZTsFszvXuoBW0ypsYyFLOoAFVC05jRTZ8SWz26@mariadb/cinder
max_retries = -1
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = 8rojnYV9r000nTH8TCY9CMghu2m2YtYiyw8XBx75
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcached_servers = memcached:11211
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[rbd-1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = f68fec1b-1831-4fd8-99d1-5487c4609c5b
report_discard_supported = True
[privsep_entrypoint]
helper_command = sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper
--config-file /etc/cinder/cinder.conf
', config.json: "{\n \"command\": \"cinder-api --config-file /etc/cinder/cinder.conf\"\
,\n \"config_files\": [\n {\n \"source\": \"/var/lib/kolla/config_files/cinder.conf\"\
,\n \"dest\": \"/etc/cinder/cinder.conf\",\n \"owner\":\
\ \"cinder\",\n \"perm\": \"0600\"\n }\n ]\n}\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: cinder-api, namespace: kolla}
configmap "cinder-api" created
apiVersion: v1
data: {config.json: "{\n \"command\": \"/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg\
\ -p /run/haproxy.pid\",\n \"config_files\": [{\n \"source\": \"/var/lib/kolla/config_files/haproxy.cfg\"\
,\n \"dest\": \"/etc/haproxy/haproxy.cfg\",\n \"owner\": \"root\"\
,\n \"perm\": \"0644\"\n }]\n}\n", haproxy.cfg: "global\n chroot /var/lib/haproxy\n\
\ user haproxy\n group haproxy\n daemon\n log /var/lib/kolla/heka/log local0\n\
\ maxconn 4000\n # commented out for now. Doesn't work on haproxy from kolla\
\ 2.x\n # its unused anyway.\n #stats socket /var/lib/kolla/haproxy/haproxy.sock\n\
\ndefaults\n log global\n mode http\n option redispatch\n option httplog\n\
\ option forwardfor\n retries 3\n timeout http-request 10s\n timeout queue\
\ 1m\n timeout connect 10s\n timeout client 1m\n timeout server 1m\n timeout\
\ check 10s\n\nlisten api\n bind 0.0.0.0:8776\n server local-api 127.0.0.1:8080\
\ check inter 2000 rise 2 fall 5\n"}
kind: ConfigMap
metadata: {name: cinder-api-haproxy, namespace: kolla}
configmap "cinder-api-haproxy" created
apiVersion: v1
data: {cinder.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/cinder
use_forwarded_for = true
use_stderr = False
enable_v1_api = false
osapi_volume_workers = 1
volume_name_template = volume-%s
glance_api_servers = http://glance-api:9292
glance_num_retries = 1
glance_api_version = 2
os_region_name = RegionOne
enabled_backends = rbd-1
backup_driver = cinder.backup.drivers.ceph
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool = backups
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
osapi_volume_listen = 0.0.0.0
osapi_volume_listen_port = 8776
api_paste_config = /etc/cinder/api-paste.ini
nova_catalog_info = compute:nova:internalURL
auth_strategy = keystone
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[oslo_messaging_notifications]
[database]
connection = mysql+pymysql://cinder:pPZTsFszvXuoBW0ypsYyFLOoAFVC05jRTZ8SWz26@mariadb/cinder
max_retries = -1
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = 8rojnYV9r000nTH8TCY9CMghu2m2YtYiyw8XBx75
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcached_servers = memcached:11211
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[rbd-1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = f68fec1b-1831-4fd8-99d1-5487c4609c5b
report_discard_supported = True
[privsep_entrypoint]
helper_command = sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper
--config-file /etc/cinder/cinder.conf
', config.json: "{\n \"command\": \"cinder-backup --config-file /etc/cinder/cinder.conf\"\
,\n \"config_files\": [\n {\n \"source\": \"/var/lib/kolla/config_files/cinder.conf\"\
,\n \"dest\": \"/etc/cinder/cinder.conf\",\n \"owner\":\
\ \"cinder\",\n \"perm\": \"0600\"\n },\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/ceph.*\",\n \"dest\"\
: \"/etc/ceph/\",\n \"owner\": \"cinder\",\n \"perm\": \"\
0700\",\n \"optional\": false\n } ]\n}\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: cinder-backup, namespace: kolla}
configmap "cinder-backup" created
apiVersion: v1
data: {cinder.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/cinder
use_forwarded_for = true
use_stderr = False
enable_v1_api = false
osapi_volume_workers = 1
volume_name_template = volume-%s
glance_api_servers = http://glance-api:9292
glance_num_retries = 1
glance_api_version = 2
os_region_name = RegionOne
enabled_backends = rbd-1
osapi_volume_listen = 0.0.0.0
osapi_volume_listen_port = 8776
api_paste_config = /etc/cinder/api-paste.ini
nova_catalog_info = compute:nova:internalURL
auth_strategy = keystone
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[oslo_messaging_notifications]
[database]
connection = mysql+pymysql://cinder:pPZTsFszvXuoBW0ypsYyFLOoAFVC05jRTZ8SWz26@mariadb/cinder
max_retries = -1
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = 8rojnYV9r000nTH8TCY9CMghu2m2YtYiyw8XBx75
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcached_servers = memcached:11211
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[rbd-1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = f68fec1b-1831-4fd8-99d1-5487c4609c5b
report_discard_supported = True
[privsep_entrypoint]
helper_command = sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper
--config-file /etc/cinder/cinder.conf
', config.json: "{\n \"command\": \"cinder-scheduler --config-file /etc/cinder/cinder.conf\"\
,\n \"config_files\": [\n {\n \"source\": \"/var/lib/kolla/config_files/cinder.conf\"\
,\n \"dest\": \"/etc/cinder/cinder.conf\",\n \"owner\":\
\ \"cinder\",\n \"perm\": \"0600\"\n }\n ]\n}\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: cinder-scheduler, namespace: kolla}
configmap "cinder-scheduler" created
apiVersion: v1
data: {cinder.conf: '[DEFAULT]
debug = True
log_dir = /var/log/kolla/cinder
use_forwarded_for = true
use_stderr = False
enable_v1_api = false
osapi_volume_workers = 1
volume_name_template = volume-%s
glance_api_servers = http://glance-api:9292
glance_num_retries = 1
glance_api_version = 2
os_region_name = RegionOne
enabled_backends = rbd-1
osapi_volume_listen = 0.0.0.0
osapi_volume_listen_port = 8776
api_paste_config = /etc/cinder/api-paste.ini
nova_catalog_info = compute:nova:internalURL
auth_strategy = keystone
transport_url = rabbit://openstack:tnNvs4ySwVo4bbPunWf3o9V5S6Z40KtMySdSYk5o@rabbitmq:5672
[oslo_messaging_notifications]
[database]
connection = mysql+pymysql://cinder:pPZTsFszvXuoBW0ypsYyFLOoAFVC05jRTZ8SWz26@mariadb/cinder
max_retries = -1
[keystone_authtoken]
auth_uri = http://keystone-public:5000/v3
auth_url = http://keystone-admin:35357/v3
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = 8rojnYV9r000nTH8TCY9CMghu2m2YtYiyw8XBx75
memcache_security_strategy = ENCRYPT
memcache_secret_key = Ku2zNgHuIOqhu1megUNhAqUBDQwyFs1N7HPyZh12
memcached_servers = memcached:11211
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[rbd-1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = f68fec1b-1831-4fd8-99d1-5487c4609c5b
report_discard_supported = True
[privsep_entrypoint]
helper_command = sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper
--config-file /etc/cinder/cinder.conf
', config.json: "{\n \"config_files\": [\n {\n \"dest\":\
\ \"/etc/cinder/cinder.conf\", \n \"source\": \"/var/lib/kolla/config_files/cinder.conf\"\
, \n \"perm\": \"0600\", \n \"owner\": \"cinder\"\n \
\ }, \n {\n \"dest\": \"/etc/ceph/ceph.client.cinder.keyring\"\
, \n \"source\": \"/var/lib/kolla/config_files/ceph.client.cinder.keyring\"\
, \n \"owner\": \"cinder\", \n \"perm\": \"0700\"\n \
\ }, \n {\n \"dest\": \"/etc/ceph/ceph.conf\", \n \
\ \"source\": \"/var/lib/kolla/config_files/ceph.conf\", \n \"\
owner\": \"cinder\", \n \"perm\": \"0700\"\n }, \n {\n\
\ \"dest\": \"/etc/ceph/ceph.conf\", \n \"source\": \"/var/lib/kolla/config_files/ceph.conf\"\
, \n \"perm\": \"0600\", \n \"owner\": \"cinder\"\n \
\ }\n ], \n \"command\": \"cinder-volume --config-file /etc/cinder/cinder.conf\"\
\n}"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: cinder-volume, namespace: kolla}
configmap "cinder-volume" created
apiVersion: v1
data: {ceph.conf: '[global]
osd pool default size = 1
osd pool default min size = 1
log file = /var/log/kolla/ceph/$cluster-$name.log
log to syslog = true
err to syslog = true
fsid = a5d6c544-e078-4fbb-8baa-6364f0c267d0
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
setuser match path = /var/lib/ceph/$type/$cluster-$id
[mon]
mon compact on start = true
', config.json: "{ \"command\": \"/usr/bin/ceph-mon -d -i @MONID@ --public-addr\
\ @MONADDR@:6789\", \"config_files\": [\n {\n \"source\"\
: \"/var/lib/kolla/config_files/ceph.conf\",\n \"dest\": \"/etc/ceph/ceph.conf\"\
,\n \"owner\": \"ceph\",\n \"perm\": \"0600\"\n },\n\
\ {\n \"source\": \"/var/lib/kolla/config_files/ceph.client.admin.keyring\"\
,\n \"dest\": \"/etc/ceph/ceph.client.admin.keyring\",\n \
\ \"owner\": \"ceph\",\n \"perm\": \"0600\",\n \"optional\"\
: true\n },\n {\n \"source\": \"/var/lib/kolla/config_files/ceph.client.mon.keyring\"\
,\n \"dest\": \"/etc/ceph/ceph.client.mon.keyring\",\n \"\
owner\": \"ceph\",\n \"perm\": \"0600\",\n \"optional\"\
: true\n },\n {\n \"source\": \"/var/lib/kolla/config_files/ceph.client.radosgw.keyring\"\
,\n \"dest\": \"/etc/ceph/ceph.client.radosgw.keyring\",\n \
\ \"owner\": \"ceph\",\n \"perm\": \"0600\",\n \"optional\"\
: true\n },\n {\n \"source\": \"/var/lib/kolla/config_files/ceph.monmap\"\
,\n \"dest\": \"/etc/ceph/ceph.monmap\",\n \"owner\": \"\
ceph\",\n \"perm\": \"0600\",\n \"optional\": true\n \
\ }\n ]\n}\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: ceph-mon, namespace: kolla}
configmap "ceph-mon" created
apiVersion: v1
data: {ceph.conf: '[global]
osd pool default size = 1
osd pool default min size = 1
log file = /var/log/kolla/ceph/$cluster-$name.log
log to syslog = true
err to syslog = true
fsid = a5d6c544-e078-4fbb-8baa-6364f0c267d0
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
setuser match path = /var/lib/ceph/$type/$cluster-$id
[mon]
mon compact on start = true
', config.json: "{ \"command\": \"/usr/bin/ceph-osd -f -d --public-addr @HOSTADDR@\
\ --cluster-addr @CLUSTERADDR@\", \"config_files\": [\n {\n \
\ \"source\": \"/var/lib/kolla/config_files/ceph.conf\",\n \"dest\"\
: \"/etc/ceph/ceph.conf\",\n \"owner\": \"ceph\",\n \"perm\"\
: \"0600\"\n },\n {\n \"source\": \"/var/lib/kolla/config_files/ceph.client.admin.keyring\"\
,\n \"dest\": \"/etc/ceph/ceph.client.admin.keyring\",\n \
\ \"owner\": \"ceph\",\n \"perm\": \"0600\"\n }\n ]\n}\n"}
kind: ConfigMap
metadata: {creationTimestamp: null, name: ceph-osd, namespace: kolla}
configmap "ceph-osd" created
(.venv)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment