Skip to content

Instantly share code, notes, and snippets.

@TechnicalMercenary
Created November 9, 2018 22:12
Show Gist options
  • Save TechnicalMercenary/1fe4dba881a9fd5d5b61fc1769e30995 to your computer and use it in GitHub Desktop.
Save TechnicalMercenary/1fe4dba881a9fd5d5b61fc1769e30995 to your computer and use it in GitHub Desktop.
locals {
extra-tags = [
{
key = "Name"
value = "${var.env-key}-node"
propagate_at_launch = true
},
{
key = "kubernetes.io/cluster/${var.env-key}"
value = "owned"
propagate_at_launch = true
},
{
key = "tf-resource"
value = "aws_autoscaling_group.eks"
propagate_at_launch = true
}
]
}
resource "aws_autoscaling_group" "eks" {
name = "${var.env-key}"
desired_capacity = "${var.autoscale-capacity}"
launch_configuration = "${aws_launch_configuration.eks.id}"
min_size = "${var.autoscale-min}"
max_size = "${var.autoscale-max}"
vpc_zone_identifier = ["${aws_subnet.eks.*.id}"]
tags = ["${concat(
data.null_data_source.autoscaling-tags.*.outputs,
local.extra-tags)
}"]
}
# Convert the Tag Map into tags for the Autoscaling group
data "null_data_source" "autoscaling-tags" {
count = "${length(var.tag-map)}"
inputs = {
key = "${ element(keys(var.tag-map), count.index) }"
value = "${ element(values(var.tag-map), count.index) }"
propagate_at_launch = "true"
}
}
#
# EKS Worker Nodes Resources
# * Data source to fetch latest EKS worker AMI
# * AutoScaling Launch Configuration to configure worker instances
# * AutoScaling Group to launch worker instances
#
data "aws_ami" "eks-worker" {
filter {
name = "name"
# values = ["amazon-eks-node-*"]
values = ["kubernetes-node*"]
}
most_recent = true
owners = ["011447054295"] # Paxata
# owners = ["602401143452"] # Amazon
}
#
# EKS Cluster Resources
# * EKS Cluster
#
resource "aws_eks_cluster" "eks" {
name = "${var.env-key}"
role_arn = "${aws_iam_role.cluster.arn}"
vpc_config {
security_group_ids = ["${aws_security_group.cluster.id}"]
subnet_ids = ["${aws_subnet.eks.*.id}"]
}
depends_on = [
"aws_iam_role_policy_attachment.cluster-policy",
"aws_iam_role_policy_attachment.service-policy",
]
}
#
# EKS Cluster Resources
# * IAM Role to allow EKS service to manage other AWS services
resource "aws_iam_role" "cluster" {
name = "${var.env-key}-cluster"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
resource "aws_iam_role_policy_attachment" "cluster-policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = "${aws_iam_role.cluster.name}"
}
resource "aws_iam_role_policy_attachment" "service-policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = "${aws_iam_role.cluster.name}"
}
#
# EKS Worker Nodes Resources
# * IAM role allowing Kubernetes actions to access other AWS services
resource "aws_iam_role" "node" {
name = "${var.env-key}-node"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
resource "aws_iam_role_policy_attachment" "node-policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = "${aws_iam_role.node.name}"
}
resource "aws_iam_role_policy_attachment" "node-cni-policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = "${aws_iam_role.node.name}"
}
resource "aws_iam_role_policy_attachment" "node-ec2-registry-readonly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.node.name}"
}
resource "aws_iam_instance_profile" "node" {
name = "${var.env-key}"
role = "${aws_iam_role.node.name}"
}
resource "aws_launch_configuration" "eks" {
associate_public_ip_address = true
iam_instance_profile = "${aws_iam_instance_profile.node.name}"
image_id = "${data.aws_ami.eks-worker.id}"
instance_type = "${var.node-instance-type}"
name_prefix = "${var.env-key}-"
key_name = "${var.aws-key-pair}"
security_groups = ["${aws_security_group.node.id}"]
user_data_base64 = "${base64encode(local.node-userdata)}"
lifecycle {
create_before_destroy = true
}
# Tags N/A
}
module "wfh"
{
source = "../module/workstation-external-ip"
}
#
# Outputs
#
locals {
config_map_aws_auth = <<CONFIGMAPAWSAUTH
apiVersion: v1
kind: ConfigMap
metadata:
name: aws-auth
namespace: kube-system
data:
mapRoles: |
- rolearn: ${aws_iam_role.node.arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
CONFIGMAPAWSAUTH
kubeconfig = <<KUBECONFIG
apiVersion: v1
clusters:
- cluster:
server: ${aws_eks_cluster.eks.endpoint}
certificate-authority-data: ${aws_eks_cluster.eks.certificate_authority.0.data}
name: ${var.env-key}
contexts:
- context:
cluster: ${var.env-key}
user: aws
name: aws
current-context: aws
kind: Config
preferences: {}
users:
- name: aws
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
command: aws-iam-authenticator
args:
- "token"
- "-i"
- "${var.env-key}"
KUBECONFIG
}
output "config_map_aws_auth" {
value = "${local.config_map_aws_auth}"
}
output "kubeconfig" {
value = "${local.kubeconfig}"
}
resource "local_file" "config_map_aws_auth" {
content = "${local.config_map_aws_auth}"
filename = "../../kubernetes/config-map-aws-auth.yml"
}
resource "local_file" "kubeconfig" {
content = "${local.kubeconfig}"
filename = "../../kubernetes/kubeconfig.yml"
}
#
# Provider Configuration
#
provider "aws" {
region = "${var.region-id}"
}
# Automatically generated file - DO NOT EDIT!
# Ensure that this file is excluded from our source control!
# File generated by infrastructure/bin/lib/aws/terraform-remote-config.bash
terraform {
backend "s3" {
bucket = "tfstate.paxata-developer.devops.paxata.com"
key = "pfried/eks.tfstate"
region = "us-west-2"
}
}
#
# EKS Cluster Resources
# * EC2 Security Group to allow networking traffic with EKS cluster
resource "aws_security_group" "cluster" {
name = "${var.env-key}-cluster"
description = "Cluster communication with worker nodes"
vpc_id = "${aws_vpc.eks.id}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = "${merge(var.tag-map, map("Name", "${var.env-key}", "tf-resource", "aws_security_group.cluster"))}"
# tags {
# Name = "terraform-eks-demo"
# }
}
resource "aws_security_group_rule" "cluster-ingress-node-https" {
description = "Allow pods to communicate with the cluster API Server"
from_port = 443
protocol = "tcp"
security_group_id = "${aws_security_group.cluster.id}"
source_security_group_id = "${aws_security_group.node.id}"
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "cluster-ingress-workstation-https" {
cidr_blocks = ["${module.wfh.workstation-external-ip}/32"]
description = "Allow workstation to communicate with the cluster API Server"
from_port = 443
protocol = "tcp"
security_group_id = "${aws_security_group.cluster.id}"
to_port = 443
type = "ingress"
}
#
# EKS Worker Nodes Resources
# * EC2 Security Group to allow networking traffic
resource "aws_security_group" "node" {
name = "${var.env-key}-node"
description = "Security group for all nodes in the cluster"
vpc_id = "${aws_vpc.eks.id}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = "${merge(var.tag-map, map("Name", "${var.env-key}", "kubernetes.io/cluster/${var.env-key}", "owned", "tf-resource", "aws_security_group.node"))}"
# tags = "${
# map(
# "Name", "terraform-eks-demo-node",
# "kubernetes.io/cluster/${var.env-key}", "owned",
# )
# }"
}
resource "aws_security_group_rule" "node-ingress-self" {
description = "Allow node to communicate with each other"
from_port = 0
protocol = "-1"
security_group_id = "${aws_security_group.node.id}"
source_security_group_id = "${aws_security_group.node.id}"
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "node-ingress-cluster" {
description = "Allow worker Kubelets and pods to receive communication from the cluster control plane"
from_port = 1025
protocol = "tcp"
security_group_id = "${aws_security_group.node.id}"
source_security_group_id = "${aws_security_group.cluster.id}"
to_port = 65535
type = "ingress"
}
# EKS currently documents this required userdata for EKS worker nodes to
# properly configure Kubernetes applications on the EC2 instance.
# We utilize a Terraform local here to simplify Base64 encoding this
# information into the AutoScaling Launch Configuration.
# More information: https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html
# More information: https://aws.amazon.com/blogs/opensource/improvements-eks-worker-node-provisioning/
# This is for the AMI to amazon-eks-node-vXX
locals {
node-userdata = <<USERDATA
#!/bin/bash
set -o xtrace
/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.eks.endpoint}' --b64-cluster-ca '${aws_eks_cluster.eks.certificate_authority.0.data}' '${var.env-key}'
USERDATA
}
### Application Identification and Description variables
# for AWS the value must be lowercase
variable "env-key" {
type = "string"
description = "The Customer Id appended with the Cluster Id."
#No Default. External process is required to set this. I want an error or block if this is not set.
}
#
variable "tag-map" {
type = "map"
description = "A Pre-generated Map of all the Tags to set"
#No Default. External process is required to set this. I want an error or block if this is not set.
}
variable "region-id"
{
}
variable "aws-key-pair"
{
}
#
# Variables Configuration
#
variable "node-instance-type"
{
default = "m4.large"
}
variable "autoscale-min"
{
default = "1"
}
variable "autoscale-max"
{
default = "2"
}
variable "autoscale-capacity"
{
default = "2"
}
#
# VPC Resources
# * VPC
# * Subnets
# * Internet Gateway
# * Route Table
#
resource "aws_vpc" "eks" {
cidr_block = "10.0.0.0/16"
tags = "${merge(var.tag-map, map("Name", "${var.env-key}", "kubernetes.io/cluster/${var.env-key}", "shared", "tf-resource", "aws_vpc.eks"))}"
# tags = "${
# map(
# "Name", "terraform-eks-demo-node",
# "kubernetes.io/cluster/${var.env-key}", "shared",
# )
# }"
}
# Find availablity zones for the subnets to use
data "aws_availability_zones" "available" {}
resource "aws_subnet" "eks" {
count = 2
availability_zone = "${data.aws_availability_zones.available.names[count.index]}"
cidr_block = "10.0.${count.index}.0/24"
vpc_id = "${aws_vpc.eks.id}"
tags = "${merge(var.tag-map, map("Name", "${var.env-key}", "kubernetes.io/cluster/${var.env-key}", "shared", "tf-resource", "aws_subnet.eks"))}"
# tags = "${
# map(
# "Name", "terraform-eks-demo-node",
# "kubernetes.io/cluster/${var.env-key}", "shared",
# )
# }"
}
resource "aws_internet_gateway" "eks" {
vpc_id = "${aws_vpc.eks.id}"
tags = "${merge(var.tag-map, map("Name", "${var.env-key}", "tf-resource", "aws_internet_gateway.eks"))}"
# tags {
# Name = "terraform-eks-demo"
# }
}
resource "aws_route_table" "eks" {
vpc_id = "${aws_vpc.eks.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.eks.id}"
}
}
resource "aws_route_table_association" "eks" {
count = 2
subnet_id = "${aws_subnet.eks.*.id[count.index]}"
route_table_id = "${aws_route_table.eks.id}"
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment