Skip to content

Instantly share code, notes, and snippets.

@bevel-zgates
Created April 15, 2022 18:06
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save bevel-zgates/3d0135f7ae200e8d1d13dcd2044bb15c to your computer and use it in GitHub Desktop.
Save bevel-zgates/3d0135f7ae200e8d1d13dcd2044bb15c to your computer and use it in GitHub Desktop.
import * as path from "path";
import * as aws from "@pulumi/aws"; // Classic LTS support
// https://www.pulumi.com/docs/guides/crosswalk/aws/vpc
import * as awsx from "@pulumi/awsx"; // Crosswalk, contains sensible defaults
// import { getAccountId } from "@pulumi/aws-native"; // Provides some helpful aws methods.
import * as eks from "@pulumi/eks";
import * as k8s from "@pulumi/kubernetes"
require("dotenv").config({ path: path.join(__dirname, "pulumi.env") });
// ===
// Declarations
// ===
const defaultTags = {
Automate: "true",
Source: "grand-next/deploy/infra",
}
const accountId = ""// "111111111111" <--- FIXME
if (accountId === "") throw new Error("Please set the accountId");
// ===
// Runtime
// ===
// Standup the VPC
const vpc = new awsx.ec2.Vpc("grand-vpc", {
cidrBlock: "10.1.0.0/24",
numberOfAvailabilityZones: 3,
numberOfNatGateways: 0,
assignGeneratedIpv6CidrBlock: false,
subnets: [{ type: "public", name: "public", }, { type: "private", name: "private" }],
tags: Object.assign(defaultTags, { Name: "grand-vpc" }),
});
// Standup the EKS deployment
const grandEks = new eks.Cluster("grand-eks", {
clusterTags: Object.assign(defaultTags, { Name: "grand-eks" }),
// clusterSecurityGroup: {}
// nodeSecurityGroup: {}
vpcId: vpc.id,
subnetIds: vpc.getSubnetsIds("private"),
// TODO, will need to implement this to support ssm for nodes
// if we want to move off of the base-line amis
// ----
// nodeUserData : new Error("Not implemented"),
enabledClusterLogTypes: ["audit"],
encryptRootBlockDevice: true,
instanceType: "t2.medium",
kubernetesServiceIpAddressRange: "172.16.0.0/16",
tags: Object.assign(defaultTags, { Name: "eks" }),
});
// Building a nodegroup that allows for more finetuning of
// Worker resources for the eks cluster
// The main decision we're making here is the use of spot instances
// within our worker fleet.
// Source : https://www.pulumi.com/registry/packages/aws/api-docs/eks/nodegroup/
const nodeGroup = new aws.eks.NodeGroup("spot", {
clusterName: grandEks.eksCluster.name,
nodeRoleArn: grandEks.instanceRoles[0].arn,
capacityType: "SPOT",
scalingConfig: {
desiredSize: 5,
minSize: 1,
maxSize: 10,
},
subnetIds: vpc.publicSubnetIds,
})
const repos = []
// Building a Container repository to hold our image builds
for (let name of ["grand_www", "grand_api"]) {
const repository = new aws.ecr.Repository(`repository_${name}`, {
name,
tags: Object.assign(defaultTags, { Name: "eks" }),
});
repos.push(repository);
new aws.ecr.RepositoryPolicy(`repository_policy_${name}`, {
repository: repository.id,
policy: JSON.stringify({
Version: "2012-10-17",
Statement: [{
Sid: "new policy",
Effect: "Allow",
Principal: "*",
Action: [
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:PutImage",
"ecr:InitiateLayerUpload",
"ecr:UploadLayerPart",
"ecr:CompleteLayerUpload",
"ecr:DescribeRepositories",
"ecr:GetRepositoryPolicy",
"ecr:ListImages",
"ecr:DeleteRepository",
"ecr:BatchDeleteImage",
"ecr:SetRepositoryPolicy",
"ecr:DeleteRepositoryPolicy"
]
}]
})
})
new aws.ecr.LifecyclePolicy(`mylifecyclepolicy_${name}`, {
repository: repository.id,
policy: JSON.stringify({
rules: [{
rulePriority: 1,
description: "Expire images older than 90 days",
selection: {
tagStatus: "untagged",
countType: "sinceImagePushed",
countUnit: "days",
countNumber: 90
},
action: {
type: "expire"
}
}]
})
});
}
// Building a credential that is able to r/w to the newly created repository
const repositoryUser = new aws.iam.User("repositoryUser", {
path: "/system/",
tags: Object.assign(defaultTags, { Name: "repositoryUser" }),
});
const accessKey = new aws.iam.AccessKey("repositoryUser", { user: repositoryUser.name });
const resourceDef = repos.map(repository => `arn:aws:ecr:*:${accountId}:repository/${repository.arn}`);
const _repositoryUserPolicy = new aws.iam.UserPolicy("repositoryUser", {
user: repositoryUser.name,
policy: JSON.stringify({
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowEcrActions",
"Effect": "Allow",
"Action": [
"ecr:GetRegistryPolicy",
"ecr:DescribeRegistry",
"ecr:DescribePullThroughCacheRules",
"ecr:GetAuthorizationToken",
"ecr:PutRegistryScanningConfiguration",
"ecr:DeleteRegistryPolicy",
"ecr:CreatePullThroughCacheRule",
"ecr:DeletePullThroughCacheRule",
"ecr:PutRegistryPolicy",
"ecr:GetRegistryScanningConfiguration",
"ecr:PutReplicationConfiguration"
],
"Resource": resourceDef
},
{
"Sid": "ECRWildcard",
"Effect": "Allow",
"Action": "ecr:*",
"Resource": resourceDef
}
]
})
})
// ===
// Helm
// ===
/**
* Note the below is left open for reference. Currently this template is deploying but regardless of the timeout the success is never reached and thus triggers the helm `atomic` rollback feature taking it back to predeployment state. We have an open conversation open with pulumi to discuss, but in the meantime we recommend running the helm chart directly.
*/
const _temporal = new k8s.helm.v3.Release("temp", {
chart: "./temporal",
version: "1.0.0",
atomic: true,
skipAwait: true,
timeout: 6000,
values: {
server: {
replicaCount:1
},
cassandra: {
config: {
cluster_size: 1,
}
},
prometheus: {
enabled: true
},
grafana: { enabled: true},
elasticearch: { enabled: false},
}
});
// ===
// Outputs
// ===
export const vpcId = vpc.id;
// These will need to be added to github secrets
export const accessKeyOutput = accessKey.id;
export const secretAccessKeyOutput = accessKey.secret;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment