Skip to content

Instantly share code, notes, and snippets.

@3sky
Created September 27, 2022 21:07
Show Gist options
  • Save 3sky/4959ec2e25729f3455e816c122b96c25 to your computer and use it in GitHub Desktop.
Save 3sky/4959ec2e25729f3455e816c122b96c25 to your computer and use it in GitHub Desktop.
just random bb cloud pipeline
definitions:
services:
docker:
memory: 3072 # increase memory for elasticsearch and test container
steps:
- step: &build-k8s-images
name: Build images
image: atlassian/pipelines-awscli
caches:
- docker
services:
- docker
oidc: true
script:
- export AWS_ROLE_ARN=${AWS_OIDC_ROLE_ARN}
- export AWS_WEB_IDENTITY_TOKEN_FILE=$PWD/web-identity-token
- echo "$BITBUCKET_STEP_OIDC_TOKEN" > "$PWD/web-identity-token"
- DOCKERFILES_DIR=${BITBUCKET_CLONE_DIR}/infrastructure/dockerfiles
- source ${DOCKERFILES_DIR}/images_uri.sh
- aws ecr describe-images --repository-name=${ECR_REPOSITORY} --image-ids=imageTag=${K8S_yyyyy_IMAGE_TAG} && yyyyy_EXISTS="True"
- aws ecr get-login-password | docker login --username AWS --password-stdin "${ECR_AWS_ACCOUNT}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com"
- |
if [[ -z "$yyyyy_EXISTS" ]]; then
docker build yyyyy -t ${K8S_yyyyy_IMAGE_URI} -f ${DOCKERFILES_DIR}/yyyyy/Dockerfile.prod --build-arg MAXMIND_LICENSE_KEY
docker push ${K8S_yyyyy_IMAGE_URI}
fi
# We need to always build the nginx image, as this includes frontend files which may be changed independently of the backend code.
- S3_PREFIX="s3://xxxx-internal/front/"
- mkdir -p front/dist/zzzz-front front/dist/xxxx-front-v2
- FRONT_FILENAME="xxxx-front-$xxxx_FRONT_BRANCH.zip"
- ${DOCKERFILES_DIR}/fetch_front.sh front "$S3_PREFIX" "$FRONT_FILENAME"
- WEBCHAT_FILENAME="xxxx-chat-front-master.zip"
- ${DOCKERFILES_DIR}/fetch_front.sh webchat-front "$S3_PREFIX" "$WEBCHAT_FILENAME"
- NEW_FRONT_FILENAME="xxxx-front-angular-$NEW_FRONT_BRANCH.zip"
- ${DOCKERFILES_DIR}/fetch_front.sh front "$S3_PREFIX" "$NEW_FRONT_FILENAME"
- K8S_WEB_IMAGE_TAG=k8s-web-${yyyyyHash}-${BITBUCKET_BUILD_NUMBER}
- K8S_WEB_IMAGE_URI=${ECR_AWS_ACCOUNT}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${ECR_REPOSITORY}:${K8S_WEB_IMAGE_TAG}
- K8S_NGINX_IMAGE_TAG=k8s-nginx-${yyyyyHash}-${BITBUCKET_BUILD_NUMBER}
- K8S_NGINX_IMAGE_URI=${ECR_AWS_ACCOUNT}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${ECR_REPOSITORY}:${K8S_NGINX_IMAGE_TAG}
- docker run -v "${BITBUCKET_CLONE_DIR}":/app "${K8S_yyyyy_IMAGE_URI}" python /app/yyyyy/manage.py collectstatic --no-input
- find front/ webchat-front/ -type f ! -name '*.html' -delete
- mv www/ ${DOCKERFILES_DIR}/nginx
- mv front/ webchat-front/ ${DOCKERFILES_DIR}/web
- cd ${DOCKERFILES_DIR}/nginx
- docker build . -t ${K8S_NGINX_IMAGE_URI}
- docker push ${K8S_NGINX_IMAGE_URI}
- cd ${DOCKERFILES_DIR}/web
- docker build . -t ${K8S_WEB_IMAGE_URI} --build-arg "BUILD_IMAGE=${K8S_yyyyy_IMAGE_URI}"
- docker push ${K8S_WEB_IMAGE_URI}
- step: &undeploy-k8s-helm
image: atlassian/pipelines-awscli
oidc: true
script:
- export AWS_ROLE_ARN=${AWS_OIDC_ROLE_ARN}
- export AWS_WEB_IDENTITY_TOKEN_FILE=$PWD/web-identity-token
- echo "$BITBUCKET_STEP_OIDC_TOKEN" > "$PWD/web-identity-token"
- apk add curl
- |
curl -sLO https://storage.googleapis.com/kubernetes-release/release/v1.21.7/bin/linux/amd64/kubectl && \
mv kubectl /usr/bin/kubectl && \
chmod +x /usr/bin/kubectl
- |
curl -sL "https://get.helm.sh/helm-v3.7.1-linux-amd64.tar.gz" | tar -xvz && \
mv linux-amd64/helm /usr/bin/helm && \
chmod +x /usr/bin/helm && \
rm -rf linux-amd64
- aws eks --region us-west-2 update-kubeconfig --name yyyyy --alias cluster-config
- kubectl config use-context cluster-config
- kubectl version
- cd ${BITBUCKET_CLONE_DIR}/infrastructure/helm
- helm uninstall --namespace="${K8S_STAGE_NAME}" celery nginx web config-chart
- step: &terraform-validate
image: hashicorp/terraform
name: Validate AWS resource
oidc: true
script:
- export AWS_ROLE_ARN=${AWS_OIDC_ROLE_ARN}
- export AWS_WEB_IDENTITY_TOKEN_FILE=$PWD/web-identity-token
- echo "$BITBUCKET_STEP_OIDC_TOKEN" > "$PWD/web-identity-token"
- cd infrastructure/terraform/${AWS_RESOURCE}
- terraform init
- terraform validate
- step: &terraform-plan
image: hashicorp/terraform
name: Plan AWS resource
oidc: true
script:
- export AWS_ROLE_ARN=${AWS_OIDC_ROLE_ARN}
- export AWS_WEB_IDENTITY_TOKEN_FILE=$PWD/web-identity-token
- echo "$BITBUCKET_STEP_OIDC_TOKEN" > "$PWD/web-identity-token"
- cd infrastructure/terraform/${AWS_RESOURCE}
- terraform init
- terraform plan -out=${AWS_RESOURCE}_plan.tfplan
artifacts:
- infrastructure/terraform/*/*_plan.tfplan
- step: &terraform-infrastructure-apply
image: hashicorp/terraform
name: Apply AWS resource
oidc: true
trigger: 'manual'
script:
- export AWS_ROLE_ARN=${AWS_OIDC_ROLE_ARN}
- export AWS_WEB_IDENTITY_TOKEN_FILE=$PWD/web-identity-token
- echo "$BITBUCKET_STEP_OIDC_TOKEN" > "$PWD/web-identity-token"
- |
if [[ "${AWS_RESOURCE}" == "ejabberd" ]]; then
apk add --no-cache ansible
fi
- cd infrastructure/terraform/${AWS_RESOURCE}
- ls -la
- terraform init
- terraform apply -input=false ${AWS_RESOURCE}_plan.tfplan
artifacts:
- infrastructure/terraform/*/*_plan.tfplan
- step: &terraform-deploy-application
image: atlassian/pipelines-awscli
name: Deploy yyyyy into k8s
oidc: true
script:
- K8S_NAMESPACE=${K8S_NAMESPACE:?'K8S_NAMESPACE missing! K8S_NAMESPACE is required for deployment!'}
- export AWS_ROLE_ARN=${AWS_OIDC_ROLE_ARN}
- export AWS_WEB_IDENTITY_TOKEN_FILE=$PWD/web-identity-token
- echo "$BITBUCKET_STEP_OIDC_TOKEN" > "$PWD/web-identity-token"
- export AWS_RESOURCE="yyyyy"
- export TERRAFORM_TARGET_DIR="${BITBUCKET_CLONE_DIR}/infrastructure/terraform/${AWS_RESOURCE}"
- DOCKERFILES_DIR="${BITBUCKET_CLONE_DIR}/infrastructure/dockerfiles"
- source "${DOCKERFILES_DIR}/images_uri.sh"
- apk add curl
- |
curl -sLO https://storage.googleapis.com/kubernetes-release/release/v1.21.7/bin/linux/amd64/kubectl && \
mv kubectl /usr/bin/kubectl && \
chmod +x /usr/bin/kubectl
- |
curl -sL "https://get.helm.sh/helm-v3.7.1-linux-amd64.tar.gz" | tar -xvz && \
mv linux-amd64/helm /usr/bin/helm && \
chmod +x /usr/bin/helm && \
rm -rf linux-amd64
- |
curl -sLO https://github.com/mozilla/sops/releases/download/v3.7.2/sops-v3.7.2.linux.amd64 && \
mv sops-v3.7.2.linux.amd64 /usr/bin/sops && \
chmod +x /usr/bin/sops
- helm plugin install https://github.com/jkroepke/helm-secrets --version v3.13.0
- |
curl https://releases.hashicorp.com/terraform/1.1.2/terraform_1.1.2_linux_amd64.zip -o terraform.zip && \
unzip terraform.zip -d /usr/bin && \
chmod +x /usr/bin/terraform && \
rm -f terraform.zip
- aws eks --region us-west-2 update-kubeconfig --name yyyyy --alias cluster-config
- kubectl config use-context cluster-config
- kubectl version
- cd "${TERRAFORM_TARGET_DIR}"
- terraform init
- terraform workspace select "$K8S_NAMESPACE"
- terraform validate
- terraform plan -var="build_tag_hash=$yyyyyHash" -var="build_number=${BITBUCKET_BUILD_NUMBER}" -out=${AWS_RESOURCE}_plan.tfplan
- terraform apply -input=false ${AWS_RESOURCE}_plan.tfplan
artifacts:
- infrastructure/terraform/*/*_plan.tfplan
- step: &build-pubsub
name: Build pubsub
image: atlassian/pipelines-awscli
services:
- docker
oidc: true
script:
- source devops/pipeline-utils.sh
- aws ecr describe-images --repository-name=${ECR_REPOSITORY} --image-ids=imageTag=${PUBSUB_TAG} && exit 0 # if image exists, no need to rebuild
- registry_login
- docker build devops/pubsub -t ${PUBSUB_IMAGE}
- docker push ${PUBSUB_IMAGE}
- step: &build
name: Docker build, test & push
image: atlassian/pipelines-awscli
caches:
- docker
services:
- docker
artifacts:
- yyyyy/.coverage
oidc: true
script:
- source devops/pipeline-utils.sh
- aws ecr describe-images --repository-name=${ECR_REPOSITORY} --image-ids=imageTag=${IMAGE_TAG} && exit 0 # if image exists, no need to rebuild
- registry_login
- prepare_test_environment
- run_tests
- get_coverage
- test_cleanup
- docker build yyyyy -t ${IMAGE_URI} -f yyyyy/Dockerfile.production --build-arg MAXMIND_LICENSE_KEY
- docker push ${IMAGE_URI}
- step: &deploy-k8s
name: Deploy K8S
image: atlassian/pipelines-awscli
oidc: true
script:
# replace by env variables in bitbucket pipeline settings
- export CLUSTER_NAME=staging
- source devops/pipeline-utils.sh
- install_kustomize
- get_k8s_credentials $CLUSTER_NAME
- cd devops/k8s/overlays/$CLUSTER_NAME
- kustomize edit set image PUBSUB_IMAGE=$PUBSUB_IMAGE
- kustomize build . | kubectl apply -f -
- step: &deploy
name: Deploy
image: atlassian/pipelines-awscli
oidc: true
script:
- source devops/pipeline-utils.sh
- registry_login
- apk add zip gettext
- envsubst < docker-compose.prod.yml > docker-compose.yml
- ./build-app.sh application.zip
- pipe: atlassian/aws-elasticbeanstalk-deploy:1.0.2
variables:
AWS_OIDC_ROLE_ARN: $AWS_OIDC_ROLE_ARN
APPLICATION_NAME: $APPLICATION_NAME
ENVIRONMENT_NAME: $ENVIRONMENT_NAME
ZIP_FILE: application.zip
S3_BUCKET: $S3_BUCKET
VERSION_LABEL: ${ENVIRONMENT_NAME}.${BITBUCKET_COMMIT:0:8}.$(date -u +%Y-%m-%d_%H%M%S)
WAIT: "true"
- pipe: sentryio/sentry-new-release:0.3.0
variables:
SENTRY_AUTH_TOKEN: $SENTRY_AUTH_TOKEN
SENTRY_ORG: 'xxxx'
SENTRY_PROJECT: 'yyyyy'
ENVIRONMENT: $ENVIRONMENT_NAME
FINALIZE: $SENTRY_FINALIZE_RELEASE
- step: &deploy-playable-ad-lambda
name: Deploy playable-ad lambda
image: node:12
oidc: true
trigger: manual
script:
- apt-get update -y
- apt-get install -y zip
- cd yyyyy/docker/playable-ad-lambda/
- ./build.sh
- pipe: atlassian/aws-s3-deploy:1.1.0
variables:
AWS_OIDC_ROLE_ARN: $AWS_OIDC_ROLE_ARN
S3_BUCKET: 'xxxx-internal/lambda'
LOCAL_PATH: 'assets'
- step: &deploy-browser-screenshot-lambda
name: Deploy browser screenshot lambda
image: node:12
oidc: true
trigger: manual
script:
- apt-get update -y
- apt-get install -y zip
- wget --output-document lambda-browser-screenshots-master.zip https://github.com/beneboy/lambda-browser-screenshots/archive/refs/heads/master.zip
- unzip lambda-browser-screenshots-master.zip
- cd lambda-browser-screenshots-master/src
- npm install
- mkdir -p ../assets
- zip -qq ../assets/browser-screenshot-lambda.zip -r -9 .
- pipe: atlassian/aws-s3-deploy:1.1.0
variables:
AWS_OIDC_ROLE_ARN: $AWS_OIDC_ROLE_ARN
S3_BUCKET: 'xxxx-internal/lambda'
LOCAL_PATH: '../assets'
pipelines:
custom:
undeploy-k8s-staging1:
- variables:
- name: K8S_STAGE_NAME
default: staging-1-internal
- step:
<<: *undeploy-k8s-helm
undeploy-k8s-staging2:
- variables:
- name: K8S_STAGE_NAME
default: staging-2-internal
- step:
<<: *undeploy-k8s-helm
undeploy-k8s-preprod:
- variables:
- name: K8S_STAGE_NAME
default: production-zzzz
- step:
<<: *undeploy-k8s-helm
deployment: production
deploy-terraform-AWS-resource:
- variables:
- name: AWS_RESOURCE
- step: *terraform-validate
- step: *terraform-plan
- step: *terraform-infrastructure-apply
deploy-terraform-yyyyy:
- step: *build-k8s-images
- step: *terraform-deploy-application
branches:
master:
- step: *build
- parallel:
- step:
<<: *deploy
deployment: staging1
- step:
<<: *deploy
deployment: staging1-new-fe
- step: *deploy-playable-ad-lambda
- step: *deploy-browser-screenshot-lambda
develop:
- parallel:
- step: *build
- step:
<<: *build-k8s-images
deployment: staging2-k8s-fe
- parallel:
- step:
<<: *terraform-deploy-application
deployment: staging2-k8s
- step:
<<: *deploy
deployment: staging2
- step:
<<: *deploy
deployment: staging2-new-fe
- step: *deploy-playable-ad-lambda
- step: *deploy-browser-screenshot-lambda
stable:
- step: *build
- step:
<<: *deploy
deployment: production
playtika:
- step: *build
- step:
<<: *deploy
deployment: playtika
pull-requests:
'**': #this runs as default for any branch not elsewhere defined
- parallel:
- step:
<<: *build-pubsub
condition:
changesets:
includePaths:
- "devops/pubsub/**"
- step:
<<: *build
condition:
changesets:
includePaths:
- "bitbucket-pipelines.yml"
- "devops/pipeline-utils.sh"
- "**.py"
- "yyyyy/requirements/**"
# - step:
# <<: *deploy-k8s
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment