Skip to content

Instantly share code, notes, and snippets.

@jpetazzo
Created January 19, 2022 10:31
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jpetazzo/9dcabb32368711c3e05a6589ec431446 to your computer and use it in GitHub Desktop.
Save jpetazzo/9dcabb32368711c3e05a6589ec431446 to your computer and use it in GitHub Desktop.
resource "google_container_cluster" "_" {
name = var.cluster_name
location = local.location
min_master_version = var.k8s_version
# To deploy private clusters, uncomment the section below,
# and uncomment the block in network.tf.
# Private clusters require extra resources (Cloud NAT,
# router, network, subnet) and the quota for some of these
# resources is fairly low on GCP; so if you want to deploy
# a lot of private clusters (more than 10), you can use these
# blocks as a base but you will probably have to refactor
# things quite a bit (you will at least need to define a single
# shared router and use it across all the clusters).
network = google_compute_network._.name
subnetwork = google_compute_subnetwork._.name
/*
private_cluster_config {
enable_private_nodes = true
# This must be set to "false".
# Otherwise, access to the public endpoint is disabled,
# and we can't connect to the API server from outside.
enable_private_endpoint = false
# This must be set to a /28.
# I think it shouldn't collide with the pod network subnet.
master_ipv4_cidr_block = "10.255.255.0/28"
}
*/
# By default, Terraform creates clusters using the legacy "ROUTES"
# networking mode. It is better to use the newer "VPC_NATIVE"
# mode. It allows "container native load balancing" (sending
# connections directly to Pods instead of bouncing through a
# NodePort). I think it's also required (?) to interconnect
# cluster. It is also mandatory for private clusters.
# https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips
# https://cloud.google.com/kubernetes-engine/docs/concepts/ingress#container-native_load_balancing
networking_mode = "VPC_NATIVE"
# ip_allocation_policy is required for VPC_NATIVE clusters.
ip_allocation_policy {
# This is the block that will be used for pods.
cluster_ipv4_cidr_block = "10.0.0.0/12"
# The services block is optional
# (GKE will pick one automatically).
#services_ipv4_cidr_block = ""
}
# We won't use that node pool but we have to declare it anyway.
# It will remain empty so we don't have to worry about it.
node_pool {
name = "default"
}
lifecycle {
ignore_changes = [ node_pool ]
}
}
resource "google_compute_network" "_" {
name = var.cluster_name
# The default is to create subnets automatically.
# However, this creates one subnet per zone in all regions,
# which causes a quick exhaustion of the subnet quota.
auto_create_subnetworks = false
}
resource "google_compute_subnetwork" "_" {
name = var.cluster_name
ip_cidr_range = "10.254.0.0/16"
region = local.region
network = google_compute_network._.id
}
resource "google_compute_router" "_" {
name = var.cluster_name
region = local.region
network = google_compute_network._.name
}
resource "google_compute_router_nat" "_" {
name = var.cluster_name
router = google_compute_router._.name
region = local.region
# Everyone in the network is allowed to NAT out.
# (We would change this if we only wanted to allow specific subnets to NAT out.)
source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES"
# Pick NAT addresses automatically.
# (We would change this if we wanted to use specific addresses to NAT out.)
nat_ip_allocate_option = "AUTO_ONLY"
}
resource "google_container_node_pool" "ondemand" {
name = "ondemand"
cluster = google_container_cluster._.id
autoscaling {
min_node_count = 0
max_node_count = 5
}
node_config {
tags = concat(var.tags, [ "ondemand" ])
preemptible = true
machine_type = var.node_type
oauth_scopes = [
"logging-write", # part of the default scopes
"monitoring", # idem
"storage-ro", # allow this node to pull from GCR
]
}
}
resource "google_container_node_pool" "preemptible" {
name = "preemptible"
cluster = google_container_cluster._.id
initial_node_count = 1
autoscaling {
min_node_count = 1
max_node_count = 5
}
node_config {
tags = concat(var.tags, [ "preemptible" ])
preemptible = true
machine_type = var.node_type
}
}
resource "google_container_node_pool" "bignodes" {
name = "bignodes"
cluster = google_container_cluster._.id
autoscaling {
min_node_count = 0
max_node_count = 5
}
node_config {
tags = concat(var.tags, [ "preemptible" ])
preemptible = true
machine_type = "n2-standard-4"
labels = {
# There is already a bunch of labels on our nodes,
# including cloud.google.com/gke-nodepool={poolname}
# But let's add one for fun!
expensive = ""
}
taint {
key = "expensive"
value = ""
effect = "NO_SCHEDULE"
}
}
}
variable "cluster_name" {
type = string
default = "klstr"
}
variable "tags" {
type = list(string)
default = []
}
# To view supported machine tpyes, run:
# gcloud compute machine-types list --zones=europe-north1-a
# (zones is not strictly necessary but makes for a shorter output)
variable "node_type" {
type = string
default = "e2-medium"
}
variable "min_nodes_per_pool" {
type = number
default = 2
}
variable "max_nodes_per_pool" {
type = number
default = 5
}
# To view supported locations, run:
# gcloud compute zones list
variable "location" {
type = string
default = null
}
# To view supported versions, run:
# gcloud container get-server-config --region=europe-north1 '--format=flattened(channels)'
# But it's also possible to just specify e.g. "1.20" and it figures it out.
variable "k8s_version" {
type = string
default = "1.21"
}
locals {
location = var.location != null ? var.location : "europe-north1-a"
registry_location = "EU"
region = replace(local.location, "/-[a-z]$/", "")
# Unfortunately, the following line doesn't work
# (that attribute just returns an empty string)
# so we have to either hard-code the project name,
# or use an environment variable (e.g. GOOGLE_PROJECT).
#project = data.google_client_config._.project
#project = "prepare-tf"
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment