Skip to content

Instantly share code, notes, and snippets.

@lays147
Last active September 22, 2023 22:09
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save lays147/ce356c822f93379bbb0d88976722e508 to your computer and use it in GitHub Desktop.
Save lays147/ce356c822f93379bbb0d88976722e508 to your computer and use it in GitHub Desktop.
Aurora + Terraform + Ansible
data "aws_rds_engine_version" "postgresql" {
engine = "aurora-postgresql"
filter {
name = "engine-mode"
values = ["serverless"]
}
}
module "aurora_postgresql_v2" {
source = "terraform-aws-modules/rds-aurora/aws"
version = "~>v8.3.1"
name = "${local.project}-postgresqlv2"
engine = data.aws_rds_engine_version.postgresql.engine
engine_mode = "provisioned"
engine_version = data.aws_rds_engine_version.postgresql.version
storage_encrypted = true
deletion_protection = true
vpc_id = local.vpc_id
subnets = local.private_subnets
create_db_subnet_group = true
create_security_group = true
copy_tags_to_snapshot = true
master_username = local.aurora_master_username
manage_master_user_password = true
iam_database_authentication_enabled = true
monitoring_interval = 60
serverlessv2_scaling_configuration = {
min_capacity = 1
max_capacity = 2
}
instance_class = "db.serverless"
instances = {
one = {}
}
tags = local.tags
}
resource "aws_ecr_repository" "this" {
name = local.project
image_tag_mutability = "MUTABLE"
image_scanning_configuration {
scan_on_push = true
}
encryption_configuration {
encryption_type = "KMS"
}
tags = local.tags
}
FROM python:3.11-buster as builder
RUN apt-get -y update && apt-get -y upgrade && apt-get install -y --no-install-recommends
# hadolint ignore=DL3042
RUN pip install poetry==1.6
ENV POETRY_NO_INTERACTION=1 \
POETRY_VIRTUALENVS_IN_PROJECT=1 \
POETRY_VIRTUALENVS_CREATE=1 \
POETRY_CACHE_DIR=/tmp/poetry_cache
WORKDIR /app
COPY ./ansible/pyproject.toml ./ansible/poetry.lock ./
RUN --mount=type=cache,target=$POETRY_CACHE_DIR poetry install --no-root
FROM python:3.11-slim-buster as runtime
WORKDIR /app
ENV VIRTUAL_ENV=/app/.venv
ENV PATH="/app/.venv/bin:$PATH"
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
COPY ./ansible/ansible.yml ./ansible.yml
ENTRYPOINT ["ansible-playbook","-i","localhost","ansible.yml"]
module "container_definition" {
source = "cloudposse/ecs-container-definition/aws"
version = "v0.60.0"
container_name = local.project
container_image = local.ecs.container_image
log_configuration = {
logDriver = "awslogs"
options = {
"awslogs-group" = aws_cloudwatch_log_group.this.name
"awslogs-region" = local.region
"awslogs-stream-prefix" = "ecs"
}
}
}
resource "aws_ecs_task_definition" "this" {
family = local.project
container_definitions = module.container_definition.json_map_encoded_list
requires_compatibilities = ["FARGATE"]
network_mode = "awsvpc"
cpu = local.context[terraform.workspace].ecs.cpu
memory = local.context[terraform.workspace].ecs.memory
task_role_arn = aws_iam_role.task.arn
execution_role_arn = aws_iam_role.exec.arn
tags = local.tags
}
data "archive_file" "lambda" {
type = "zip"
output_path = "${path.module}/lambda_function_payload.zip"
source {
content = templatefile("${path.module}/scripts/lambda.tftpl", {
cluster_name = local.cluster_name
subnets = jsonencode(local.private_subnets),
security_group_ids = jsonencode([aws_security_group.this.id])
task_definition_name = aws_ecs_task_definition.this.arn
})
filename = "lambda.js"
}
}
data "aws_iam_policy_document" "lambda" {
statement {
effect = "Allow"
actions = ["ecs:runTask"]
resources = [aws_ecs_task_definition.this.arn]
}
statement {
effect = "Allow"
actions = ["iam:PassRole"]
resources = [aws_iam_role.exec.arn, aws_iam_role.task.arn]
}
}
module "ansible_trigger" {
source = "terraform-aws-modules/lambda/aws"
version = "~>v6.0.0"
description = "Trigger to run an ecs task"
handler = "lambda.handler"
runtime = "nodejs18.x"
function_name = local.project
create_package = false
local_existing_package = data.archive_file.lambda.output_path
attach_policy_json = true
policy_json = data.aws_iam_policy_document.lambda.json
tags = merge(local.tags, { "Name" : local.project })
}
const { ECSClient, RunTaskCommand } = require("@aws-sdk/client-ecs");
const client = new ECSClient({ region: "us-east-1" });
const params = {
cluster: "${cluster_name}",
enableECSManagedTags: true,
launchType: "FARGATE",
networkConfiguration: {
awsvpcConfiguration: {
subnets: ${subnets},
securityGroups: ${security_group_ids},
assignPublicIp: "DISABLED",
},
},
taskDefinition: "${task_definition_name}",
};
exports.handler = async (event) => {
console.log("Triggering task");
const command = new RunTaskCommand(params);
await client.send(command);
console.log("End of trigger - Check ECS Tasks to get status");
};
data "aws_iam_policy_document" "ansible_access" {
statement {
effect = "Allow"
actions = ["secretsmanager:ListSecrets"]
resources = ["*"]
}
statement {
effect = "Allow"
actions = [
"secretsmanager:GetResourcePolicy",
"secretsmanager:GetSecretValue",
"secretsmanager:DescribeSecret",
"secretsmanager:ListSecretVersionIds"
]
resources = [ module.aurora_postgresql_v2.cluster_master_user_secret[0].secret_arn ]
}
statement {
effect = "Allow"
actions = ["rds:DescribeDBClusters", "rds:ListTagsForResource"]
resources = ["*"]
}
}
resource "aws_iam_policy" "ansible_access" {
path = "/${local.project}/"
name = "${local.project}-ansible-permissions"
policy = data.aws_iam_policy_document.ansible_access.json
tags = local.tags
}
---
- name: Configuration of Aurora Serverless Postgres
hosts: localhost
vars:
cluster_name: rds-cluster-name
database_name: database-name
app_user_name: application-user
aws_iam_role: rds_iam
tasks:
- name: Retrieve database info
amazon.aws.rds_cluster_info:
cluster_name: '{{ cluster_name }}'
register: _clusters
- name: Get Cluster Instance
ansible.builtin.set_fact:
_cluster_info: '{{ _clusters.clusters | first }}'
- name: Get Secret key path
ansible.builtin.set_fact:
secret_path: '{{ _cluster_info.master_user_secret.secret_arn | regex_search("(rds!cluster-.*(?=-[^-]*$))") }}'
- name: Retrieve database credentials
ansible.builtin.set_fact:
db_credentials: "{{ lookup('amazon.aws.aws_secret', secret_path) }}"
- name: Create a new database
community.postgresql.postgresql_db:
name: '{{ database_name }}'
login_user: '{{ db_credentials.username }}'
login_password: '{{ db_credentials.password }}'
login_host: '{{ _cluster_info.endpoint }}'
- name: Connect to database, create app user
community.postgresql.postgresql_user:
db: '{{ database_name }}'
name: '{{ app_user_name }}'
login_user: '{{ db_credentials.username }}'
login_password: '{{ db_credentials.password }}'
login_host: '{{ _cluster_info.endpoint }}'
- name: Grant user access to database
community.postgresql.postgresql_privs:
db: '{{ database_name }}'
privs: ALL
type: database
role: '{{ app_user_name }}'
login_user: '{{ db_credentials.username }}'
login_password: '{{ db_credentials.password }}'
login_host: '{{ _cluster_info.endpoint }}'
- name: Allow iam connection from user
community.postgresql.postgresql_membership:
group: '{{ aws_iam_role }}'
target_roles:
- '{{ app_user_name }}'
state: present
db: '{{ database_name }}'
login_user: '{{ db_credentials.username }}'
login_password: '{{ db_credentials.password }}'
login_host: '{{ _cluster_info.endpoint }}'
fail_on_role: false
data "aws_iam_policy_document" "rds_iam" {
statement {
effect = "Allow"
actions = ["rds-db:connect"]
resources = ["arn:aws:rds-db:${local.region}:${data.aws_caller_identity.this.account_id}:dbuser:${module.aurora_postgresql_v2.cluster_resource_id}/${local.database.user}"]
}
}
name: Run Ansible - Manual Trigger
on:
workflow_dispatch:
inputs:
LAMBDA_NAME:
required: true
type: choice
description: Lambda to trigger
options:
- ansible-trigger-stg
- ansible-trigger-prd
ENVIRONMENT:
required: true
description: Environment to use
type: environment
jobs:
invoke-lambda:
runs-on: ubuntu
environment: ${{ inputs.ENVIRONMENT }}
permissions:
id-token: write
contents: read
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: ${{ secrets.ANSIBLE_ASSUME_ROLE }}
aws-region: us-east-1
mask-aws-account-id: yes
- name: Invoke lambda function
id: lambda
run: |
aws lambda invoke --function-name ${{ inputs.LAMBDA_NAME }} response.json
cat response.json
grep -q 'error' response.json && exit 1 || exit 0
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment