Skip to content

Instantly share code, notes, and snippets.

@huksley
Last active November 27, 2023 17:28
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save huksley/746665004649c3ed3536fc0bd12650ec to your computer and use it in GitHub Desktop.
Save huksley/746665004649c3ed3536fc0bd12650ec to your computer and use it in GitHub Desktop.
AWS ECS cluster with CDK for NextJS deployment
const { App, Stack } = require("aws-cdk-lib");
const iam = require("aws-cdk-lib/aws-iam");
const ecs = require("aws-cdk-lib/aws-ecs");
const ec2 = require("aws-cdk-lib/aws-ec2");
const ecr = require("aws-cdk-lib/aws-ecr");
const log = require("aws-cdk-lib/aws-logs");
const route53 = require("aws-cdk-lib/aws-route53");
const patterns = require("aws-cdk-lib/aws-ecs-patterns");
const cert = require("aws-cdk-lib/aws-certificatemanager");
const { Duration } = require("aws-cdk-lib");
const capitalize = s => s.substring(0, 1).toUpperCase() + s.substring(1);
class CdkStack extends Stack {
constructor(scope, id, props, image, stage, hostname) {
super(scope, id, props);
const taskRole = iam.Role.fromRoleName(
this,
"ecs-task-role",
"ecs-task-role"
);
const vpc = ec2.Vpc.fromLookup(this, "vpc", {
vpcId: process.env.VPC_ID
});
const cluster = new ecs.Cluster(this, "cluster", {
vpc,
enableFargateCapacityProviders: true
});
const repository = ecr.Repository.fromRepositoryName(this, "imageRepository", image);
const logGroup = log.LogGroup.fromLogGroupName(this, "logs", stage + "-ecs");
const lb = new patterns.ApplicationLoadBalancedFargateService(this, "service", {
cluster,
cpu: 1024,
desiredCount: 1,
memoryLimitMiB: 2048,
taskRole,
loadBalancerName: stage + "-lb",
serviceName: stage + "-service",
circuitBreaker: { rollback: false },
domainName: hostname,
domainZone: route53.HostedZone.fromHostedZoneAttributes(this, "zone", {
hostedZoneId: process.env.ZONE_ID,
zoneName: process.env.ZONE_NAME
}),
listenerPort: 443,
protocol: "HTTPS",
taskImageOptions: {
environment: {
NEXTAUTH_URL: "https://" + hostname,
GIT_TAGS: process.env.GIT_TAGS || "latest"
},
image: ecs.ContainerImage.fromEcrRepository(
repository,
process.env.GIT_TAGS || "latest"
),
containerPort: 3000,
executionRole: taskRole,
taskRole,
logDriver: ecs.LogDrivers.awsLogs({
logGroup,
streamPrefix: "app",
mode: ecs.AwsLogDriverMode.NON_BLOCKING
})
}
});
lb.service.autoScaleTaskCount({ minCapacity: 1, maxCapacity: 2 });
lb.loadBalancer.addRedirect({
sourcePort: 80,
sourceProtocol: "HTTP",
targetPort: 443,
targetProtocol: "HTTPS"
});
// https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#connection-idle-timeout
lb.loadBalancer.setAttribute("idle_timeout.timeout_seconds", "300");
lb.targetGroup.configureHealthCheck({
path: "/api/health",
healthyThresholdCount: 2,
timeout: Duration.seconds(5),
interval: Duration.seconds(10)
});
lb.targetGroup.setAttribute("deregistration_delay.timeout_seconds", "30");
// FIXME: https://github.com/aws/aws-cdk/issues/20233
lb.service.taskDefinition.defaultContainer.props.healthCheck = {
command: ["CMD-SHELL", "curl -f http://localhost:3000/api/health || exit 1"],
interval: Duration.seconds(10),
retries: 3,
timeout: Duration.seconds(5)
};
}
}
const stage = process.env.DEPLOY_STAGE;
const image = process.env.IMAGE_NAME
const hostname = process.env.DEPLOY_HOSTNAME
const app = new App();
new CdkStack(
app,
"App" + capitalize(stage),
{
env: { account: process.env.CDK_DEFAULT_ACCOUNT, region: process.env.CDK_DEFAULT_REGION }
},
image,
stage,
hostname
);
ACCOUNT_ID="$(aws sts get-caller-identity --query \"Account\" --output text)"
TIMESTAMP=`date "+%Y%d%m%H%M"`
GIT_TAGS=$(git describe --always --tags --dirty=-${DEPLOY_STAGE})-ci-${GITHUB_RUN_NUMBER:-$TIMESTAMP}
IMAGE_REPO=app
export NEXTAUTH_URL=${NEXTAUTH_URL:-https://app.example.com}
aws logs create-log-group --log-group-name ${IMAGE_REPO}-ecs
aws logs put-retention-policy --log-group-name ${IMAGE_REPO}-ecs --retention-in-days 30
echo $(aws ecr get-login-password --region eu-west-1) | docker login --password-stdin --username AWS ${ACCOUNT_ID}.dkr.ecr.eu-west-1.amazonaws.com/${IMAGE_REPO}
docker buildx build --platform=linux/amd64 \
--build-arg NEXTAUTH_URL=${NEXTAUTH_URL} \
--build-arg AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--build-arg AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--target=runner \
--load \
--tag=${IMAGE_REPO}:${GIT_TAGS} --progress=plain .
docker tag ${IMAGE_REPO}:${GIT_TAGS} ${IMAGE_REPO}:latest
echo "Pushing ${IMAGE_REPO}:${GIT_TAGS} as ${IMAGE_REPO}:${GIT_TAGS}"
docker tag ${IMAGE_REPO}:${GIT_TAGS} ${ACCOUNT_ID}.dkr.ecr.eu-west-1.amazonaws.com/${IMAGE_REPO}:${GIT_TAGS}
docker push ${ACCOUNT_ID}.dkr.ecr.eu-west-1.amazonaws.com/${IMAGE_REPO}:${GIT_TAGS}
npx cdk
#
# Based on offical image from https://nextjs.org/docs/deployment#docker-image
# See dockerfile: https://github.com/vercel/next.js/blob/canary/examples/with-docker/Dockerfile
#
FROM node:16-alpine AS deps
RUN apk add --no-cache libc6-compat git curl python3 make g++
WORKDIR /opt/app
RUN addgroup --system --gid 1001 app
RUN adduser --system --uid 1001 app -h /opt/app
USER app
COPY package.json package-lock.json ./
COPY tools/eslint ./tools/eslint
RUN --mount=type=cache,uid=1001,gid=1001,target=/opt/app/.npm NODE_OPTIONS=--max_old_space_size=2048 DISABLE_OPENCOLLECTIVE=true CI=1 NEXT_TELEMETRY_DISABLED=1 SLS_TELEMETRY_DISABLED=1 npm --no-audit --prefer-offline ci
# Rebuild the source code only when needed
FROM node:16-alpine AS builder
# Generic setup
RUN apk add --no-cache libc6-compat git curl
WORKDIR /opt/app
RUN addgroup --system --gid 1001 app
RUN adduser --system --uid 1001 app -h /opt/app
USER app
COPY --from=deps /opt/app/node_modules ./node_modules
COPY --chown=app:app . .
ENV AWS_REGION eu-west-1
ENV NEXT_TELEMETRY_DISABLED 1
ENV SLS_TELEMETRY_DISABLED 1
ENV SLS_NOTIFICATIONS_MODE off
ENV NODE_ENV production
ARG NEXTAUTH_URL http://localhost:3000
ARG AWS_DEFAULT_REGION eu-west-1
ARG AWS_ACCESS_KEY_ID
ARG AWS_SECRET_ACCESS_KEY
ARG AWS_SESSION_TOKEN
ARG GIT_TAGS dev
RUN npx next build --debug
# Production image, copy all the files and run next
FROM node:16-alpine AS runner
# Generic setup
RUN apk add --no-cache libc6-compat git curl
WORKDIR /opt/app
RUN addgroup --system --gid 1001 app
RUN adduser --system --uid 1001 app -h /opt/app
COPY --from=builder /opt/app/public ./public
COPY next.config.js .
COPY --from=builder /opt/app/package.json ./package.json
COPY --from=builder /opt/app/package-lock.json ./package-lock.json
COPY --from=builder --chown=app:app /opt/app/.next/standalone ./
COPY --from=builder --chown=app:app /opt/app/.next/static ./.next/static
COPY --from=builder --chown=app:app /opt/app/.next/server ./.next/server
COPY --from=builder --chown=app:app /opt/app/.env.local ./
USER app
EXPOSE 3000
ENV PORT 3000
ENV AWS_REGION eu-west-1
ENV NEXT_TELEMETRY_DISABLED 1
ENV SLS_TELEMETRY_DISABLED 1
ENV SLS_NOTIFICATIONS_MODE off
ENV NODE_ENV production
ENV GIT_TAGS dev
ENV MEMORY 1536
ENV NEXTAUTH_URL http://localhost:3000
HEALTHCHECK --interval=30s --timeout=5s CMD curl -f http://localhost:3000/api/health || exit 1
CMD node --max-old-space-size=${MEMORY:-1536} server
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment