Skip to content

Instantly share code, notes, and snippets.

@Silthus
Last active February 3, 2023 07:59
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Silthus/bf63c4cebd87970e93fd69b3ac47e833 to your computer and use it in GitHub Desktop.
Save Silthus/bf63c4cebd87970e93fd69b3ac47e833 to your computer and use it in GitHub Desktop.
Backstage Secured Production Dockerfile
FROM alpine:latest as build
ARG ALPINE_VERSION=latest-stable
ENV LANG C.UTF-8
ENV LC_CTYPE C.UTF-8
ENV LC_ALL C.UTF-8
RUN apk add --no-cache wget bash tar tzdata
RUN wget -q --no-check-certificate -O /tmp/alpine.tar.gz https://dl-cdn.alpinelinux.org/alpine/${ALPINE_VERSION}/releases/x86_64/$(wget -qO- http://dl-cdn.alpinelinux.org/alpine/${ALPINE_VERSION}/releases/x86_64/latest-releases.yaml | grep -o -m1 "\S*miniroot\S*tar.gz")
RUN mkdir -p /tmp/alpine/; tar xfz /tmp/alpine.tar.gz -C /tmp/alpine/
RUN echo -e "http://dl-cdn.alpinelinux.org/alpine/${ALPINE_VERSION}/main\n\
http://dl-cdn.alpinelinux.org/alpine/${ALPINE_VERSION}/community\n\
@edge http://dl-cdn.alpinelinux.org/alpine/edge/main\n\
@edgecommunity http://dl-cdn.alpinelinux.org/alpine/edge/community\n\
@testing http://dl-cdn.alpinelinux.org/alpine/edge/testing" > /tmp/alpine/etc/apk/repositories
RUN cp /usr/share/zoneinfo/Europe/Berlin /tmp/alpine/etc/localtime && echo "Europe/Berlin" > /tmp/alpine/etc/timezone
FROM scratch
USER root
COPY --from=build /tmp/alpine/ /
RUN apk upgrade
CMD ["/bin/sh"]
ARG ALPINE_VERSION=latest
FROM base/alpine-base:${ALPINE_VERSION}
RUN apk add --update nodejs
USER 1000
WORKDIR /app
CMD ["/bin/sh"]
ARG ALPINE_VERSION=latest
FROM base/alpine-base:${ALPINE_VERSION}
ARG ALPINE_VERSION
RUN if [ "$ALPINE_VERSION" = "3.16" ] || [ "$ALPINE_VERSION" = "3.17" ] ; \
then \
apk add --no-cache --update nodejs npm git chromium openjdk11 firefox ; \
else \
apk add --no-cache --update nodejs npm git chromium openjdk11 firefox python2 ; \
fi
RUN adduser -S --uid 6010 -h /home/node node
USER node
RUN npm config set prefix /home/node/npm \
&& npm config set cache /home/node/npm_cache
ENV PATH="/home/node/npm/bin:${PATH}"
RUN mkdir /home/node/project \
&& chown node /home/node/project
WORKDIR /home/node/project
CMD ["/bin/sh"]
#!/bin/bash
# proxy vars would allow backstage to escape the container and reach the internet
if [[ -v HTTP_PROXY ]] || [[ -v HTTPS_PROXY ]]; then
echo "HTTP/S_PROXY must not be set. Please unset it and try again."
exit 1
fi
while read -r v; do
env_key="$v"
vault_path_and_key="${!v}"
echo "Fetching secret for $env_key from $vault_path_and_key..."
secret=$(/get_secrets_from_vault.sh "$vault_path_and_key")
export "$env_key"="$secret"
done < <(compgen -A variable SECRET_) # this is needed to export variables to the outside of the while loop as while creates a subshell when piped
echo "Starting the application..."
# Start the main process.
node packages/backend --config app-config.yaml --config app-config.production.yaml
# Stage 1 - Create yarn install skeleton layer
FROM base/alpine-npm:alpine-3.17 AS packages
USER root
WORKDIR /app
COPY package.json yarn.lock ./
COPY packages packages
# Comment this out if you don't have any internal plugins
COPY plugins plugins
RUN find packages \! -name "package.json" -mindepth 2 -maxdepth 2 -exec rm -rf {} \+
# Stage 2 - Install dependencies and build packages
FROM base/alpine-npm:alpine-3.17 AS build
USER root
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apk update && \
apk add alpine-sdk && \
apk add --virtual --no-cache python3 yarn && \
yarn config set python /usr/bin/python3
USER node
WORKDIR /app
COPY --from=packages --chown=node:node /app .
# Stop cypress from downloading it's massive binary.
ENV CYPRESS_INSTALL_BINARY=0
RUN --mount=type=cache,target=/home/node/.cache/yarn,sharing=locked,uid=1000,gid=1000 \
yarn install --frozen-lockfile --network-timeout 600000
COPY --chown=node:node . .
RUN yarn tsc
RUN yarn build:backend
RUN mkdir packages/backend/dist/skeleton packages/backend/dist/bundle \
&& tar xzf packages/backend/dist/skeleton.tar.gz -C packages/backend/dist/skeleton \
&& tar xzf packages/backend/dist/bundle.tar.gz -C packages/backend/dist/bundle
# Stage 3 - Build the actual backend image and install production dependencies
FROM base/alpine-npm:alpine-3.17 as dependencies
USER root
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apk update && \
apk add alpine-sdk && \
apk add --virtual --no-cache python3 py3-pip yarn jq && \
pip3 install --upgrade virtualenv --no-deps --force-reinstall && \
yarn config set python /usr/bin/python3
# From here on we use the least-privileged `node` user to run the backend.
USER node
# This should create the app dir as `node`.
# If it is instead created as `root` then the `tar` command below will fail: `can't create directory 'packages/': Permission denied`.
# If this occurs, then ensure BuildKit is enabled (`DOCKER_BUILDKIT=1`) so the app dir is correctly created as `node`.
WORKDIR /app
ENV VIRTUAL_ENV=/app/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
# uninstall pip to remove it from the virtual env (it has security findings and we don't need it)
RUN python3 -m venv $VIRTUAL_ENV && \
pip3 install mkdocs-techdocs-core==1.1.7 && \
pip3 uninstall -y pip
# Copy the install dependencies from the build stage and context
COPY --from=build --chown=node:node /app/yarn.lock /app/package.json /app/packages/backend/dist/skeleton/ ./
RUN --mount=type=cache,target=/home/node/.cache/yarn,sharing=locked,uid=1000,gid=1000 \
yarn install --frozen-lockfile --production --network-timeout 600000
# Stage 4 - Build the deployed image
FROM base/alpine-nodejs:alpine-3.17
USER root
# install python3 for executing mkdocs-techdocs-core
# install curl and jq for getting secrets from vault in entrypoint script
RUN adduser -S --uid 6010 -h /home/node node && \
apk add python3 curl jq bash
USER node
WORKDIR /app
ENV VIRTUAL_ENV=/app/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
# Copy the dependencies and the virtual env from the production-build stage
COPY --from=dependencies --chown=node:node /app/.venv /app/.venv
COPY --from=dependencies --chown=node:node /app/node_modules /app/node_modules
# Copy the built packages from the build stage
COPY --from=build --chown=node:node /app/packages/backend/dist/bundle/ ./
# Copy any other files that we need at runtime
COPY --chown=node:node app-config.yaml app-config.production.yaml ./
COPY --chown=node:node --chmod=755 scripts/docker_entrypoint.sh /entrypoint.sh
COPY --chown=node:node --chmod=755 scripts/get_secrets_from_vault.sh /get_secrets_from_vault.sh
# This switches many Node.js dependencies to production mode.
ENV NODE_ENV production
ENTRYPOINT ["/entrypoint.sh"]
SECRET_POSTGRES_PASSWORD: "v1/azure/postgresql/dev/backstage/admin:backstage"
#!/bin/bash
# Prerequisites:
# kubernetes jwt under token_path
# usage: get_secrets_from_vault.sh <vault_path:secret_key>
secret_path="$1"
secret_complete_path=$(echo "$secret_path" | cut -d':' -f1)
secret_name=$(echo "$secret_path" | cut -d':' -f2)
export KUBE_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
LOGIN_RESPONSE=$(curl -ks -X POST -d "{\"jwt\":\"${KUBE_TOKEN}\",\"role\":\"${VAULT_KUBERNETES_ROLE}\"}" -H 'Content-Type:application/json' "${VAULT_URL}/v1/auth/${VAULT_KUBERNETES_PATH}/login")
VAULT_TOKEN=$(echo "$LOGIN_RESPONSE" | grep -o 'client_token":"[^"]*' | cut -c 16-)
SECRETS_RESPONSE=$(curl -ks -X GET -H "X-Vault-Token: ${VAULT_TOKEN}" "${VAULT_URL}/v1/system_kv/data/${secret_complete_path}")
SECRET=$(echo $SECRETS_RESPONSE | jq -r ".data.data.${secret_name}" )
echo "${SECRET}"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment