Skip to content

Instantly share code, notes, and snippets.

@matt-slalom
Created August 18, 2021 16:33
Show Gist options
  • Save matt-slalom/3d81c98e4835064db768f2a4a51a76f3 to your computer and use it in GitHub Desktop.
Save matt-slalom/3d81c98e4835064db768f2a4a51a76f3 to your computer and use it in GitHub Desktop.
Pulumi Code Comparison
import pulumi
import pulumi_eks as eks
import pulumi_aws as aws
import pulumi_kubernetes as k8s
cluster_name = 'matt-test-eks-cluster'
node_group_name = 'matt-test-eks-ng'
my_public_ip = "X.X.X.X/32" # Replace
global_tags = {
"Owner": "Matt"
}
the_role = aws.iam.Role(
"example", assume_role_policy="""{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
""")
eks_cluster_policy = aws.iam.RolePolicyAttachment(
"example-AmazonEKSClusterPolicy",
policy_arn="arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
role=the_role.name)
# Reference: https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html
eksvpc_resource_controller = aws.iam.RolePolicyAttachment(
"example-AmazonEKSVPCResourceController",
policy_arn="arn:aws:iam::aws:policy/AmazonEKSVPCResourceController",
role=the_role.name)
myVpc = aws.ec2.Vpc(
resource_name='matt_test_vpc', enable_dns_support=True, enable_dns_hostnames=True, cidr_block="10.2.0.0/24", tags={
"Name": "matt_test_vpc"
})
myIgw = aws.ec2.InternetGateway(
"matt-test-eks-igw",
vpc_id=myVpc.id
)
myRouteTable = aws.ec2.RouteTable(
"matt-test-eks-route-table",
vpc_id=myVpc.id,
routes=[
{
"cidr_block": "0.0.0.0/0",
"gateway_id": myIgw.id
}
]
)
public_subnet_ids = []
# Create public subnets that will be used for the AWS Load Balancer Controller
for zone, public_subnet_cidr in zip(['us-west-2a', 'us-west-2b'], ['10.2.0.0/25', '10.2.0.128/25']):
public_subnet = aws.ec2.Subnet(
f"matt-test-eks-public-subnet-{zone}",
assign_ipv6_address_on_creation=False,
vpc_id=myVpc.id,
map_public_ip_on_launch=True,
cidr_block=public_subnet_cidr,
availability_zone=zone,
tags={
# Custom tags for subnets
"Name": f"matt-test-eks-public-subnet-{zone}",
"cluster_tag": "owned",
"kubernetes.io/role/elb": "1",
}
)
aws.ec2.RouteTableAssociation(
f"matt-test-eks-public-rta-{zone}",
route_table_id=myRouteTable.id,
subnet_id=public_subnet.id,
)
public_subnet_ids.append(public_subnet.id)
my_security_group = aws.ec2.SecurityGroup(
resource_name="matt-test-eks-secgrp",
name="matt-test-eks-secgrp",
ingress=[aws.ec2.SecurityGroupIngressArgs(
from_port=0,
to_port=0,
protocol="-1",
cidr_blocks=[my_public_ip, "10.0.0.0/8"]
)],
egress=[aws.ec2.SecurityGroupEgressArgs(
from_port=0,
to_port=0,
protocol="-1",
cidr_blocks=["0.0.0.0/0"]
)],
tags=global_tags,
vpc_id=myVpc.id
)
worker_iam_role = aws.iam.Role(
"aws-eks-worker-role",
tags=global_tags,
assume_role_policy={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com",
},
"Action": "sts:AssumeRole"
}
]
},
managed_policy_arns=["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
"arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
)
# Create a base EKS cluster
cluster = eks.Cluster(
resource_name=cluster_name,
name=cluster_name,
tags=global_tags,
vpc_id=myVpc.id,
skip_default_node_group=False,
node_group_options={
"key_name": "matt-keypair",
"desired_capacity": 2,
"min_size": 1,
"max_size": 2,
"encrypt_root_block_device": True,
"instance_type": "t3a.medium"
},
public_access_cidrs=[my_public_ip],
# instance_role=worker_iam_role,
endpoint_public_access=True,
endpoint_private_access=True,
public_subnet_ids=public_subnet_ids,
create_oidc_provider=True,
cluster_security_group_tags={"ClusterSecurityGroupTag": "true"},
node_security_group_tags={"NodeSecurityGroupTag": "true"}
)
provider = k8s.Provider("provider", kubeconfig=cluster.kubeconfig)
# Windows NodeGroup
node_group = eks.NodeGroup(
resource_name=node_group_name,
cluster=cluster,
# https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-windows-ami.html
# aws ssm get-parameter --name /aws/service/ami-windows-latest/Windows_Server-2019-English-Core-EKS_Optimized-1.20/image_id --region us-west-2 --query "Parameter.Value" --output text
# CloudFormation expects size >= 50GB for the Windows AMI
ami_id="ami-092263943bcda23f5",
node_root_volume_size=50,
instance_type="m5.large",
key_name="matt-keypair",
desired_capacity=2,
min_size=1,
max_size=2,
encrypt_root_block_device=True,
instance_profile=aws.iam.InstanceProfile(
node_group_name + "-windows",
aws.iam.InstanceProfileArgs(role=worker_iam_role)
),
opts=pulumi.ResourceOptions(
providers={"kubernetes": cluster.provider, "provider": provider}
),
cluster_ingress_rule=cluster.eks_cluster_ingress_rule,
node_security_group=cluster.node_security_group,
cloud_formation_tags={
"CloudFormationGroupTag": "true",
f"kubernetes.io/cluster/{cluster_name}": "owned",
"k8s.io/cluster-autoscaler/enabled": "true",
f"k8s.io/cluster-autoscaler/{cluster_name}": "true"
},
node_user_data_override=
"""<powershell>
[string]$EKSBinDir = "$env:ProgramFiles\\Amazon\\EKS"
[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
[string]$EKSBootstrapScriptFile = "$EKSBinDir\\$EKSBootstrapScriptName"
& $EKSBootstrapScriptFile -EKSClusterName "{cluster_name}" -KubeletExtraArgs "--node-labels=alpha.eksctl.io/cluster-name={cluster_name},alpha.eksctl.io/nodegroup-name={nodegroup_name} --register-with-taints=" 3>&1 4>&1 5>&1 6>&1
</powershell>""".format(cluster_name=cluster_name, nodegroup_name=node_group_name)
)
pulumi.export("kubeconfig", cluster.kubeconfig)
"use strict";
import * as aws from "@pulumi/aws";
import * as k8s from "@pulumi/kubernetes";
import * as eks from "@pulumi/eks";
const clusterName = 'matt-test-eks-cluster'
const nodeGroupName = 'matt-test-eks-ng'
const myPublicIp = "x.x.x.x/32" //replace
const zip = (...arrays: any[]) => {
const minLen = Math.min(...arrays.map(arr => arr.length));
const [firstArr, ...restArrs] = arrays;
return firstArr.slice(0, minLen).map(
(val: any, i: string | number) => [val, ...restArrs.map(arr => arr[i])]
);
}
const globalTags = {
"Owner": "Matt"
}
const theRole = new aws.iam.Role("matt_eks_role", {
name: "matt_eks_role",
assumeRolePolicy: JSON.stringify({
Version: "2012-10-17",
Statement: [{
Action: "sts:AssumeRole",
Effect: "Allow",
Sid: "",
Principal: {
Service: "eks.amazonaws.com",
},
}],
}),
tags: globalTags,
});
const eksClusterPolicy = new aws.iam.RolePolicyAttachment("AmazonEKSClusterPolicy", {
policyArn: "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
role: theRole.name
});
const eksvpcResourceController = new aws.iam.RolePolicyAttachment("AmazonEKSVPCResourceController", {
policyArn: "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController",
role: theRole.name
});
const myVpc = new aws.ec2.Vpc("matt_test_vpc", {
cidrBlock: "10.2.0.0/24",
enableDnsHostnames: true,
enableDnsSupport: true,
tags: Object.assign(globalTags, {"Name": "matt_test_vpc"})
});
const myIgw = new aws.ec2.InternetGateway("matt-test-eks-igw", {
vpcId: myVpc.id
});
const myRouteTable = new aws.ec2.RouteTable("matt-test-eks-route-table", {
vpcId: myVpc.id,
routes: [
{
cidrBlock: "0.0.0.0/0",
gatewayId: myIgw.id
}
]
});
let publicSubnetIds = [];
for (const [zone, publicSubnetCidr] of zip(['us-west-2a', 'us-west-2b'], ['10.2.0.0/25', '10.2.0.128/25'])) {
const publicSubnet = new aws.ec2.Subnet(`matt-test-eks-public-subnet-${zone}`, {
assignIpv6AddressOnCreation: false,
vpcId: myVpc.id,
mapPublicIpOnLaunch: true,
availabilityZone: zone,
tags: Object.assign(globalTags, {
"Name": `matt-test-eks-public-subnet-${zone}`,
"cluster_tag": "owned",
"kubernetes.io/role/elb": "1",
}),
cidrBlock: publicSubnetCidr
});
new aws.ec2.RouteTableAssociation(`matt-test-eks-public-rta-${zone}`, {
routeTableId: myRouteTable.id,
subnetId: publicSubnet.id,
});
publicSubnetIds.push(publicSubnet.id);
}
const mySecurityGroup = new aws.ec2.SecurityGroup("matt-test-eks-secgrp", {
name: "matt-test-eks-secgrp",
ingress: [{
fromPort: 0,
toPort: 0,
protocol: "-1",
cidrBlocks: [myPublicIp, "10.0.0.0/8"]
}],
egress: [{
fromPort: 0,
toPort: 0,
protocol: "-1",
cidrBlocks: ["0.0.0.0/0"]
}],
tags: globalTags,
vpcId: myVpc.id
});
const myWorkerIamRole = new aws.iam.Role("aws-eks-worker-role", {
name: "aws-eks-worker-role",
tags: globalTags,
managedPolicyArns: ["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
"arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
],
assumeRolePolicy: {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com",
},
"Action": "sts:AssumeRole"
}
]
}
});
// base cluster with two Linux nodes for control plane (e.g., DNS)
const cluster = new eks.Cluster(clusterName, {
name: clusterName,
tags: globalTags,
vpcId: myVpc.id,
skipDefaultNodeGroup: false,
instanceRole: myWorkerIamRole,
nodeGroupOptions: {
keyName: "matt-keypair",
desiredCapacity: 2,
minSize: 1,
maxSize: 2,
encryptRootBlockDevice: true,
instanceType: "t3a.medium"
},
publicAccessCidrs: [myPublicIp],
endpointPublicAccess: true,
endpointPrivateAccess: true,
publicSubnetIds: publicSubnetIds,
createOidcProvider: true,
clusterSecurityGroupTags: {"ClusterSecurityGroupTag": "true"},
nodeSecurityGroupTags: {"NodeSecurityGroupTag": "true"}
});
const provider = new k8s.Provider("provider", {kubeconfig: cluster.kubeconfig});
// Windows EKS nodes for running Windows containers
// VPC resource controller must already be configured before this step
const nodeGroup = new eks.NodeGroup(nodeGroupName, {
cluster: cluster,
amiId: "ami-092263943bcda23f5",
nodeRootVolumeSize: 50,
instanceType: "m5.large",
keyName: "matt-keypair",
desiredCapacity: 2,
minSize: 1,
maxSize: 2,
encryptRootBlockDevice: true,
instanceProfile: new aws.iam.InstanceProfile(nodeGroupName + "-standard", {role: myWorkerIamRole}),
clusterIngressRule: cluster.eksClusterIngressRule,
nodeSecurityGroup: cluster.nodeSecurityGroup,
cloudFormationTags: cluster.core.cluster.name.apply(clusterName => ({
"CloudFormationGroupTag": "true",
"k8s.io/cluster-autoscaler/enabled": "true",
[`kubernetes.io/cluster/${clusterName}`]: "owned",
[`k8s.io/cluster-autoscaler/${clusterName}`]: "true",
})),
nodeUserDataOverride:
`<powershell>
[string]$EKSBinDir = "$env:ProgramFiles\\Amazon\\EKS"
[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
[string]$EKSBootstrapScriptFile = "$EKSBinDir\\$EKSBootstrapScriptName"
& $EKSBootstrapScriptFile -EKSClusterName ${clusterName} -KubeletExtraArgs "--node-labels=alpha.eksctl.io/cluster-name=${clusterName},alpha.eksctl.io/nodegroup-name=${nodeGroupName}" 3>&1 4>&1 5>&1 6>&1
</powershell>`
}, {
providers: {kubernetes: cluster.provider, provider: provider},
});
export const kubeconfig = cluster.kubeconfig;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment