Skip to content

Instantly share code, notes, and snippets.

@mjturek
Forked from BahaVv/setup.sh
Last active November 2, 2020 20:31
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mjturek/2ea88802d994bb06fe94260e555b1a8b to your computer and use it in GitHub Desktop.
Save mjturek/2ea88802d994bb06fe94260e555b1a8b to your computer and use it in GitHub Desktop.
RDO-build-container-images
#!/bin/bash
WORKSPACE="/home/centos"
CICO_USER_DIR="/root"
OPENSTACK_GIT_URL="https://opendev.org/openstack"
OPENSTACK_SRC="opendev.org/openstack"
OPENSTACK_SRC_DIR="$WORKSPACE/src/$OPENSTACK_SRC"
OPENSTACK_CICO_DIR="$CICO_USER_DIR/src/$OPENSTACK_SRC"
ANSIBLE_HOSTS="$WORKSPACE/hosts"
echo "If there is no further output, you have not entered a recognized argument."
if [ $# -lt 1 ]; then
echo "No arguments detected. Expected arguments are..."
echo "--reset | --clean-podman | --pre-run | --run"
exit
fi
if [[ $EUID -ne 0 ]]; then
echo "All functions of this script must be run as root. Exiting..."
exit
fi
if [[ "$*" == *--reset* ]]; then
echo "Resetting job run environment..."
rm -rf /root/*
rm $WORKSPACE/hosts
rm $WORKSPACE/*.yaml
rm -rf $WORKSPACE/src
rm -rf $WORKSPACE/roles
rm -rf /etc/yum.repos.d/delorean*
echo "Finished reset!"
fi
if [[ "$*" == *--clean-podman* ]]; then
echo "Removing all built containers!"
podman rmi --force $(podman images -q)
podman rm --force $(podman ps -a -q)
echo "Finished cleaning up podman!"
fi
if [[ "$*" == *--pre-run* ]]; then
echo "Executing pre-run procedures..."
dnf install -y git podman
dnf group install -y "Development Tools"
# Set docker.io as the only registry, as using the default of docker.centos will result in downloading
# an x86 centos base container regardless of specified architecture.
sed -i "s/.*registry.centos.org.*/registries = ['docker.io']/g" /etc/containers/registries.conf
mkdir -p $WORKSPACE/src/opendev.org/openstack-infra
pushd $WORKSPACE/src/opendev.org/openstack-infra
echo "Cloning down tipleo-ci repo..."
git clone https://github.com/openstack/tripleo-ci.git
echo "Adding skydive workaround due to lack of power support..."
cat <<EOF >> tripleo-ci/roles/build-containers/tasks/venv_setup.yml
- name: Remove skydive from kolla
file:
path: "${CICO_USER_DIR}/workspace/venv_build/share/kolla/docker/skydive"
state: absent
EOF
popd
echo "Finished pre-run tasks!"
fi
if [[ "$*" == *--run* ]]; then
echo "Kicking off a build..."
#install deps
sudo yum install -y git gcc libffi-devel openssl-devel python3-pip iptables podman
sudo pip3 install ansible==2.9.13 psutil
# Create folder to host source
mkdir -p $OPENSTACK_SRC_DIR
mkdir -p $OPENSTACK_CICO_DIR
# Cat out to playbook to clone repos for parity with upstream
cat <<EOF >> $WORKSPACE/clone_playbook.yaml
- hosts: all
tasks:
- name: 'Clone infra deps'
git:
repo: '$OPENSTACK_GIT_URL{{ item }}.git'
dest: '$OPENSTACK_SRC_DIR{{ item }}'
with_items:
- '/ansible-role-bindep'
- '/ansible-role-container-registry'
- '/tripleo-ansible'
- name: 'Clone zuul deps'
git:
repo: 'https://opendev.org/zuul/zuul-jobs/'
dest: '$WORKSPACE/src/zuul/zuul-jobs'
- name: 'Clone playbook deps'
git:
repo: '$OPENSTACK_GIT_URL/{{ item }}.git'
dest: '$OPENSTACK_CICO_DIR/{{ item }}'
with_items:
- 'ansible-role-container-registry'
- 'tripleo-repos'
- 'kolla'
- 'python-tripleoclient'
- 'tripleo-ansible'
- 'tripleo-common'
- 'requirements'
EOF
/usr/local/bin/ansible-playbook -vvvv --connection=local --inventory 127.0.0.1, $WORKSPACE/clone_playbook.yaml
# Get the proper roles for the playbooks to execute
# Copy roles from tripleo-ci
mkdir -p ${WORKSPACE}/roles
cp -r $OPENSTACK_SRC_DIR-infra/tripleo-ci/roles/* ${WORKSPACE}/roles
cp -r $OPENSTACK_SRC_DIR/tripleo-ansible/roles/* ${WORKSPACE}/roles
cp -r $OPENSTACK_SRC_DIR/ansible-role-bindep ${WORKSPACE}/roles/bindep
cp -r $OPENSTACK_SRC_DIR/ansible-role-container-registry ${WORKSPACE}/roles/ansible-role-container-registry
cp -r $WORKSPACE/src/zuul/zuul-jobs/roles/ensure-pip ${WORKSPACE}/roles
# Mock some zuul vars so the playbooks can run on jenkins
cat <<EOF >> ${WORKSPACE}/roles/build-containers/vars/main.yaml
zuul:
projects:
$OPENSTACK_SRC/tripleo-repos:
src_dir: "src/$OPENSTACK_SRC/tripleo-repos"
$OPENSTACK_SRC/python-tripleoclient:
src_dir: "src/$OPENSTACK_SRC/python-tripleoclient"
branch: "master"
pipeline: "periodic"
use_buildah: false
use_kolla: true
EOF
cat <<EOF >> ${WORKSPACE}/roles/tripleo-repos/vars/main.yml
zuul:
branch: master
projects:
$OPENSTACK_SRC/tripleo-repos:
src_dir: "src/$OPENSTACK_SRC/tripleo-repos"
$OPENSTACK_SRC/python-tripleoclient:
src_dir: "src/$OPENSTACK_SRC/python-tripleoclient"
EOF
# Create iptables chain because the playbooks expect it to exist
sudo iptables -N openstack-INPUT
# Create a hosts file. The playbooks must be fed an inventory file, and
# will complain about running on localhost, so we arbitrarily use 8.8.8.8
cat << EOF > ${ANSIBLE_HOSTS}
8.8.8.8
[openstack_nodes]
localhost ansible_connection=local
EOF
# Create local versions of pre.yaml and run.yaml
cat <<EOF >> $WORKSPACE/new_buildcontainers.yaml
- hosts: openstack_nodes
name: TripleO Setup Container Registry and repos mirror
roles:
- role: tripleo-repos
override_repos: "{{ buildcontainers_override_repos | default('') }}"
tripleo_repos_repository: "${CICO_USER_DIR}/{{ zuul.projects['opendev.org/openstack/tripleo-repos'].src_dir }}"
- role: bindep
bindep_dir: "${CICO_USER_DIR}/{{ zuul.projects['opendev.org/openstack/python-tripleoclient'].src_dir }}"
tasks:
- name: Run build containers pre tasks
include_role:
name: build-containers
tasks_from: pre
- hosts: openstack_nodes
name: TripleO container image building job
tasks:
- name: Run build containers tasks
include_role:
name: build-containers
EOF
# Run the playbooks locally
/usr/local/bin/ansible-playbook -vvvv -i $WORKSPACE/hosts new_buildcontainers.yaml
echo "Build run finished!"
fi
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment