Skip to content

Instantly share code, notes, and snippets.

@sgratzl
Last active December 29, 2017 14:55
Show Gist options
  • Save sgratzl/9e312ad86749968459605b99796826c7 to your computer and use it in GitHub Desktop.
Save sgratzl/9e312ad86749968459605b99796826c7 to your computer and use it in GitHub Desktop.
wrapdocker text eof=LF
version: "2.0"
services:
jenkins:
build: '.'
volumes:
- 'jenkins:/var/jenkins_home'
- '/var/run/docker.sock:/var/run/docker.sock'
ports:
- '9090:8080'
volumes:
jenkins: null
FROM jenkins/jenkins:latest
MAINTAINER samuel.gratzl@datavisyn.io
# Switch user to root so that we can install apps (jenkins image switches to user "jenkins" in the end)
USER root
# Install Docker prerequisites
RUN apt-get update -qq && apt-get install -qqy \
xvfb iceweasel python-dev python-pip virtualenv \
apt-transport-https \
apparmor \
ca-certificates \
curl \
lxc \
iptables \
&& pip install awscli
# Install Docker from Docker Inc. repositories.
RUN curl -sSL https://get.docker.com/ | sh
# clean up
RUN apt-get -y --purge autoremove && rm -rf /var/lib/apt/lists/*
# Add jenkins user to the docker groups so that the jenkins user can run docker without sudo
RUN gpasswd -a jenkins docker
# Install the magic wrapper
ADD ./wrapdocker /usr/local/bin/wrapdocker
RUN chmod +x /usr/local/bin/wrapdocker
VOLUME /var/lib/docker
# trigger to use Firefox instead of Chrome for testing with karma
ENV CONTINUOUS_INTEGRATION=Jenkins
USER root
CMD wrapdocker /usr/local/bin/jenkins.sh
def family = JOB_BASE_NAME.replace('update_','')
def prefix = 'aws --region eu-central-1'
node {
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: 'PHOVEA_AWS_CREDENTIALS']]) {
stage('cleaning up') {
sh "${prefix} ecr describe-repositories --output text | awk '{print \$5}' | while read line; do ${prefix} ecr list-images --repository-name \$line --filter tagStatus=UNTAGGED --query 'imageIds[*]' --output text | while read imageId; do ${prefix} ecr batch-delete-image --repository-name \$line --image-ids imageDigest=\$imageId; done; done"
}
}
}
def family = JOB_BASE_NAME.replace('update_','')
def prefix = 'aws --output text --region eu-central-1'
node {
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: 'PHOVEA_AWS_CREDENTIALS']]) {
stage('stopping') {
def out = sh script: "${prefix} ecs list-tasks --cluster caleydo --family ${family}", returnStdout: true
echo 'Running Tasks:'
echo out
if (out) {
def taskId = out.split('\t')[1]
sh "${prefix} ecs stop-task --cluster caleydo --task ${taskId}"
} else {
echo "nothing to stop"
}
}
stage('starting') {
echo 'Starting Task again:'
sh "${prefix} ecs run-task --cluster caleydo --task-definition ${family} --started-by jenkinsAutoUpdate"
}
}
}

Setup Jenkins from scratch

  1. initial plugin selection
  • github plugin
  • no ldap
  • no pam
  • no subversion
  • xUnit
  • no Ant
  • no Gradle
  1. manual installation of plugins:
  • Amazon ECR plugin
  • Xvfb plugin
  • nodejs plugin
  • Cloudbees Docker Build and Publish plugin
  • CloudBees Docker Custom Build Environment Plugin
  • Docker Pipeline
  1. configure global tools
  • node
    • name: node-v7
    • install automatically: node v7
  • xvfb
    • name: xvfb
  • docker
    • name: docker
    • install latest
  1. add credentials
  • global:
    • ID: PHOVEA_GITHUB_CREDENTIALS
    • Username: caleydo-bot
    • Password:
  • add domain: 922145058410.dkr.ecr.eu-central-1.amazonaws.com
    • type: AWS ECR credentials
    • id: PHOVEA_AWS_CREDENTIALS
    • Access Key ID:
    • Secret Access Key:

Setting Up a Product Job

#!/bin/bash
# Ensure that all nodes in /dev/mapper correspond to mapped devices currently loaded by the device-mapper kernel driver
dmsetup mknodes
# First, make sure that cgroups are mounted correctly.
CGROUP=/sys/fs/cgroup
: {LOG:=stdio}
[ -d $CGROUP ] ||
mkdir $CGROUP
mountpoint -q $CGROUP ||
mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
echo "Could not make a tmpfs mount. Did you use --privileged?"
exit 1
}
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
then
mount -t securityfs none /sys/kernel/security || {
echo "Could not mount /sys/kernel/security."
echo "AppArmor detection and --privileged mode might break."
}
fi
# Mount the cgroup hierarchies exactly as they are in the parent system.
for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
do
[ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
mountpoint -q $CGROUP/$SUBSYS ||
mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
# The two following sections address a bug which manifests itself
# by a cryptic "lxc-start: no ns_cgroup option specified" when
# trying to start containers withina container.
# The bug seems to appear when the cgroup hierarchies are not
# mounted on the exact same directories in the host, and in the
# container.
# Named, control-less cgroups are mounted with "-o name=foo"
# (and appear as such under /proc/<pid>/cgroup) but are usually
# mounted on a directory named "foo" (without the "name=" prefix).
# Systemd and OpenRC (and possibly others) both create such a
# cgroup. To avoid the aforementioned bug, we symlink "foo" to
# "name=foo". This shouldn't have any adverse effect.
echo $SUBSYS | grep -q ^name= && {
NAME=$(echo $SUBSYS | sed s/^name=//)
ln -s $SUBSYS $CGROUP/$NAME
}
# Likewise, on at least one system, it has been reported that
# systemd would mount the CPU and CPU accounting controllers
# (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
# but on a directory called "cpu,cpuacct" (note the inversion
# in the order of the groups). This tries to work around it.
[ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
done
# Note: as I write those lines, the LXC userland tools cannot setup
# a "sub-container" properly if the "devices" cgroup is not in its
# own hierarchy. Let's detect this and issue a warning.
grep -q :devices: /proc/1/cgroup ||
echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
grep -qw devices /proc/1/cgroup ||
echo "WARNING: it looks like the 'devices' cgroup is not mounted."
# Now, close extraneous file descriptors.
pushd /proc/self/fd >/dev/null
for FD in *
do
case "$FD" in
# Keep stdin/stdout/stderr
[012])
;;
# Nuke everything else
*)
eval exec "$FD>&-"
;;
esac
done
popd >/dev/null
# If a pidfile is still around (for example after a container restart),
# delete it so that docker can start.
rm -rf /var/run/docker.pid
# If we were given a PORT environment variable, start as a simple daemon;
# otherwise, spawn a shell as well
if [ "$PORT" ]
then
exec dockerd -H 0.0.0.0:$PORT -H unix:///var/run/docker.sock \
$DOCKER_DAEMON_ARGS
else
if [ "$LOG" == "file" ]
then
dockerd $DOCKER_DAEMON_ARGS &>/var/log/docker.log &
else
dockerd $DOCKER_DAEMON_ARGS &
fi
(( timeout = 60 + SECONDS ))
until docker info >/dev/null 2>&1
do
if (( SECONDS >= timeout )); then
echo 'Timed out trying to connect to internal docker host.' >&2
break
fi
sleep 1
done
[[ $1 ]] && exec "$@"
fi
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment