Skip to content

Instantly share code, notes, and snippets.

@zimbatm
Created June 8, 2022 09:35
Show Gist options
  • Save zimbatm/418b22b7cd7ab2c6ff47b2e12a06b65d to your computer and use it in GitHub Desktop.
Save zimbatm/418b22b7cd7ab2c6ff47b2e12a06b65d to your computer and use it in GitHub Desktop.
data "aws_vpc" "default" {
default = true
}
data "aws_subnet_ids" "default" {
vpc_id = data.aws_vpc.default.id
}
data "terraform_remote_state" "main" {
backend = "s3"
config = {
bucket = "modeldrive-staging-terraform-state"
dynamodb_table = "modeldrive-staging-terraform-state"
key = "main"
region = local.aws_region
encrypt = true
profile = "modeldrive-staging"
}
}
provider "postgresql" {
host = local.remote_state.rds_hostname
port = 5432
username = local.remote_state.rds_username
password = local.remote_state.rds_password
connect_timeout = 15
sslmode = "require"
superuser = false
}
resource "postgresql_role" "database_user" {
name = local.name
login = true
roles = ["rds_iam"]
}
resource "postgresql_database" "database" {
name = local.name
# Make sure to destroy the database before the role
depends_on = [postgresql_role.database_user]
owner = local.remote_state.rds_username
}
#!/usr/bin/env bash
set -euo pipefail
cd "$(dirname "$0")"
pull_request=$1
# Where the per-PR state is being stored.
state_bucket=s3://modeldrive-staging-terraform-state/pull_request
# Destroy the environment
if ! github-deploy cleanup --pull-request "$pull_request" -- ./env.sh "$pull_request" destroy -auto-approve; then
# Try a second time if the first-one didn't work. This can happen because
# some resources depend on each-other.
github-deploy cleanup --pull-request "$pull_request" -- ./env.sh "$pull_request" destroy -auto-approve
fi
# And remove the associated Terraform state from the bucket
aws s3 rm --profile modeldrive-staging "$state_bucket/$pull_request"
# Remove all snapshots for PR
../../scripts/remove-db-snapshots.sh modeldrive-staging 0 "pr-${pull_request}"
#!/usr/bin/env bash
# Destroys all the staging environments
set -euo pipefail
cd "$(dirname "$0")"
if [[ -n ${PR_NUMBER:-} ]]; then
prs=$PR_NUMBER
else
prs=$(./list.sh)
fi
# For each PR that has an associated Terraform state
for pull_request in $prs; do
echo --------------------------------------------------------------
echo "Cleaning PR $pull_request"
echo --------------------------------------------------------------
# Terraform Destroy the old environments
./down.sh "$pull_request" || true
done
# Cleanup old database users
echo --------------------------------------------------------------
echo "Cleaning old DB users"
echo --------------------------------------------------------------
../../targets/aws_staging/pg_users_cleanup.sh
echo --------------------------------------------------------------
echo SUCCESS
#!/usr/bin/env bash
set -euo pipefail
pull_request=$1
shift 1
cd "$(dirname "$0")"
rm -rf .terraform
export TF_VAR_pull_request=$pull_request
source ../../../ops/scripts/compute-metadata.sh
echo "$VITE_METADATA_JSON" >metadata.json
terraform init -backend-config="key=pull_request/$pull_request"
terraform "$@"
#!/usr/bin/env bash
# List the deployed PRs
set -euo pipefail
state_bucket=s3://modeldrive-staging-terraform-state/pull_request/
PRs=$(aws s3 ls --profile modeldrive-staging "$state_bucket")
echo "$PRs" | awk '{ print $4 }'
terraform {
backend "s3" {
bucket = "modeldrive-staging-terraform-state"
region = "eu-west-2"
encrypt = true
profile = "modeldrive-staging"
# Disable locking. It's not super safe, but PR deployments are not
# criticial, and it avoids issues when re-deploying a deleted state.
# dynamodb_table = "modeldrive-staging-terraform-state"
# Set during terraform init
#key = "something"
}
required_providers {
postgresql = {
source = "cyrilgdn/postgresql"
}
}
}
provider "aws" {
region = local.aws_region
profile = local.aws_profile
}
locals {
name = "pr-${var.pull_request}"
aws_region = "eu-west-2"
aws_profile = "modeldrive-staging"
tags = {
target = "${basename(abspath(path.module))}-${local.name}"
}
dns_zone = "staging.modeldrive.com"
domain_name = "${local.name}.${local.dns_zone}"
deploy_env = "review"
remote_state = data.terraform_remote_state.main.outputs
}
module "modeldrive_image_frontend" {
source = "../../images"
image_name = "frontend"
}
module "modeldrive_image_backend" {
source = "../../images"
image_name = "backend"
}
module "modeldrive" {
source = "../../modules/aws_modeldrive"
aws_ses_region_endpoint = "email-smtp.eu-west-2.amazonaws.com"
aws_region = local.aws_region
ecs_cluster_arn = local.remote_state.ecs_cluster_arn
lb_sg_id = local.remote_state.lb_sg_id
lb_listener_arn = local.remote_state.lb_listener_arn
domain_name = local.domain_name
name = local.name
tags = local.tags
vpc_id = data.aws_vpc.default.id
vpc_subnets = data.aws_subnet_ids.default.ids
image_frontend = module.modeldrive_image_frontend.image
image_backend = module.modeldrive_image_backend.image
# rds_cluster_id = local.remote_state.rds_cluster_id
rds_database = local.name
rds_hostname = local.remote_state.rds_hostname
rds_username = local.name
redis_url = local.remote_state.redis_url
ecs_cluster_name = local.remote_state.ecs_cluster_name
elb_arn = local.remote_state.lb_elb_arn
run_backend_migration = var.run_backend_migration
deploy_env = local.deploy_env
target_env = local.aws_profile
dns_zone = local.dns_zone
# All staging environments get provisioned with a modeldrive/drive!
# superuser.
django_superuser_password = "drive!"
# Add a zero to the priorities so PRs won't clash.
rule_priority_base = "${var.pull_request}0"
metadata = jsondecode(file("${path.module}/metadata.json"))
# Make sure to create those resources before deploying the app
depends_on = [
postgresql_role.database_user,
postgresql_database.database,
]
}
output "url" {
value = "https://${local.domain_name}/"
}
#!/usr/bin/env bash
set -euo pipefail
: "${1?"Missing pull request"}"
: "${2?"Missing task name"}"
cd "$(dirname "$0")"
pull_request=$1
task=$2
if [[ $task != "frontend" && $task != "backend" ]]; then
# shellcheck disable=SC2016
echo 'Task must be `frontend` or `backend`'
exit 1
fi
export AWS_PROFILE=modeldrive-staging
../../scripts/ecs-shell.sh "modeldrive-staging" "pr-${pull_request}-$task"
#!/usr/bin/env bash
set -euo pipefail
cd "$(dirname "$0")"
pull_request=$1
SNAPSHOT_NAME="PR-${pull_request}-$(date +%Y-%m-%d-%H-%M)"
export SNAPSHOT_NAME
export AWS_PROFILE=modeldrive-staging
export DB_CLUSTER=modeldrive-staging
../../scripts/create-db-snapshot.sh "$SNAPSHOT_NAME"
../../scripts/remove-db-snapshots.sh "$DB_CLUSTER" 1 "pr-${pull_request}"
echo "::set-output name=snapshot_name::${SNAPSHOT_NAME}"
#!/usr/bin/env bash
set -euo pipefail
cd "$(dirname "$0")"
pull_request=$1
export TF_VAR_run_backend_migration="${2:-true}"
if [[ -v GITHUB_RUN_ID ]]; then
BUILD_URL="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}"
github-deploy --git-ref-commit please --environment-url "https://pr-$pull_request.staging.modeldrive.com" --pr "$pull_request" --build-url "$BUILD_URL" -- ./env.sh "$pull_request" apply -auto-approve
else
./env.sh "$pull_request" apply -auto-approve
fi
URL=$(terraform output -json | jq -r '.url.value')
# Used by GitHub actions to set the environment URL
echo "::set-output name=url::${URL}"
# Passed by the ./env.sh script
variable "pull_request" {
type = number
}
variable "run_backend_migration" {
type = bool
default = false
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment