Skip to content

Instantly share code, notes, and snippets.

@richardmoe
Created May 7, 2024 09:35
Show Gist options
  • Save richardmoe/5d232059fa74bf793d28c9050487ffeb to your computer and use it in GitHub Desktop.
Save richardmoe/5d232059fa74bf793d28c9050487ffeb to your computer and use it in GitHub Desktop.
Lab terraform config
module "gkee-plattform-lab-onprem" {
source = "terraform-registry.nrk.cloud/nrkno/iac-terraform-gcloud-gkee-kubernetes/generic"
version = "1.2.6"
google_dns_zone_name = "ma.k8s-lab.gcp.nrk.cloud"
google_dns_name = "plattform-ip"
google_dns_name_internal = "plattform-ip"
#! felles: 10.27.1.15. lab: 10.252.241.145. Lab requires Fortinet VPN to access
gkee_admin_workstation_ip = "10.252.241.145"
gkee_admin_workstation_ssh_key = data.vault_generic_secret.gkee_admin_workstation_ssh_lab.data.tls_private_key_pem
gkee_cluster_admin_group = "7bbf7a6f-b277-495f-b5ca-80416d2964ba"
#! felles: gke-admin-mtp7g. lab: gke-admin-cm676.
cluster_admin_cluster_membership = "projects/nrk-platt-gke-enterprise-test/locations/global/memberships/gke-admin-cm676"
cluster_on_prem_version = "1.28.400-gke.75"
cluster_name = "gkee-plattform-lab"
cluster_location = "europe-west3"
cluster_google_project = "nrk-platt-gke-enterprise-test"
cluster_anti_affinity_groups = false
cluster_storage_vsphere_csi_disabled = false
cluster_vm_tracking_enabled = false
cluster_auto_repair_config = true
cluster_enable_control_plane_v2 = true
cluster_upgrade_policy_control_plane_only = false
cluster_annotations = null
cluster_control_plane_node = {
cpus = 4
memory = 8196
replicas = 3
auto_resize_config = true
}
cluster_network_config = {
service_address_cidr_blocks = ["192.168.20.0/22"]
pod_address_cidr_blocks = ["10.124.0.0/16"]
dhcp_ip_config = true
vcenter_network = "IP-LAB/gke-usercluster1-lab"
host_config = {
dns_servers = ["10.252.243.2", "10.252.243.3"]
ntp_servers = ["ntp.uio.no"]
dns_search_domains = []
}
control_plane_v2_config = {
netmask = "255.255.255.128"
gateway = "10.252.242.1"
ips = [
{
ip = "10.252.242.5"
hostname = "gkee-plattform-lab-control-plane-01"
},
{
ip = "10.252.242.6"
hostname = "gkee-plattform-lab-control-plane-02"
},
{
ip = "10.252.242.7"
hostname = "gkee-plattform-lab-control-plane-03"
}
]
}
}
cluster_load_balancer = {
vip_config = {
control_plane_vip = "10.252.242.97"
# ingress_vip = "10.252.242.98"
ingress_vip = ""
}
metal_lb_config = [
{
pool = "gke-usercluster1-lab-lb-pool"
addresses = ["10.252.242.98-10.252.242.126"]
avoid_buggy_ips = true
manual_assign = false
}
]
}
cluster_dataplane_v2 = {
dataplane_v2_enabled = true
windows_dataplane_v2_enabled = false
advanced_networking = true
}
cluster_vcenter = {
resource_pool = "IP-LAB/Resources"
datastore = "IPLAB_NFSDATASTORE"
datacenter = "Marienlyst"
cluster = "IP-LAB"
folder = "/Marienlyst/vm/IP-LAB"
address = "malabvc01.vmware.nrk.cloud"
storage_policy_name = ""
#! ca_cert_data needs to be base64 encoded
#* Leaving it empty will use same CA used for provisioning workstation
ca_cert_data = ""
}
cluster_nodepools = [
{
name = "gkee-plattform-lab-nodepool"
config = {
cpus = 8
memory_mb = 32768
replicas = 3
image_type = "ubuntu_containerd"
boot_disk_size_gb = 100
enable_load_balancer = true
}
node_pool_autoscaling = {
min_replicas = 3
max_replicas = 10
}
}
]
akamai_dns_api_user = {
api_host = data.vault_generic_secret.akamai_credentials_certmanager.data.host
access_token = data.vault_generic_secret.akamai_credentials_certmanager.data.access_token
client_secret = data.vault_generic_secret.akamai_credentials_certmanager.data.client_secret
client_token = data.vault_generic_secret.akamai_credentials_certmanager.data.client_token
}
cloud_provider = "gke"
creator = "Plattform-utvikleropplevelse"
disable_access_log = true
enable_cert_manager = false
enable_cert_manager_clusterissuer = true
enable_filebeat = false
enable_gatekeeper = false
enable_gorgon = false
enable_grafana_mimir = true
enable_icinga = false
enable_ingress_nginx_helm = true
enable_local_loki = false
enable_opentelemetry = true
enable_prometheus_adapter = false
enable_prometheus_operator = false
enable_prometheus_stack = true
enable_promtail = true
enable_scalingschedule_controller = false
enable_smb_csi = true
enable_vault_auth = true
enable_victoriametrics = false
escloud_plattform_password = data.lastpass_secret.escloud_plattform.password
escloud_plattform_server_url = null
escloud_plattform_username = data.lastpass_secret.escloud_plattform.username
external_dns_zone_name = null
gatekeeper_constraint_enforcement_action = null
gatekeeper_controller_manager_resources = null
gatekeeper_exempt_namespaces = null
gatekeeper_priority_class_name = null
gatekeeper_sync_profile = null
icinga_k8s_ssh_image = null
icinga_k8s_ssh_key = null
icinga_k8s_ssh_user = null
ingress_admission_webhook_priority_class_name = null
ingress_autoscaler = null
ingress_criticality = null
ingress_default_backend_priority_class_name = null
ingress_default_ssl_cert = null
ingress_environment = null
ingress_external = false
ingress_extra_args = null
ingress_extra_config = null
ingress_helm_extra_args = null
ingress_internal = true
ingress_limits = null
ingress_node_pool_enable = true
ingress_nrk_monitoring = "true"
ingress_requests = null
ingress_source_ip_list = null
ingress_source_ip_list_internal = null
ingress_use_x_forwarded_for = true
internal_dns_zone_name = "plattform-test"
k8s_cert-view_ns = null
kube_cluster_ca_certificate = null
kube_cluster_owner_team = "plattform"
kube_outbound_ip = null
kube_token = module.IP-LAB_GKE_ServiceAccount-component-access.private_key
kubernetes_priority_classes = null
kured = true
kured_args = null
letsencrypt_cloud_dns_provider_yaml = null
letsencrypt_email = "linuxadmin@nrk.no"
loki_address = "https://loki-ingest-nonprod.nrk.cloud/loki/api/v1/push"
loki_password = data.vault_generic_secret.loki_nonprod_credentials.data.password
loki_username = data.vault_generic_secret.loki_nonprod_credentials.data.username
mimir_address = "https://mimir-ingest-nonprod.nrk.cloud/api/v1/push"
mimir_password = data.vault_generic_secret.mimir_nonprod_credentials.data.password
mimir_username = data.vault_generic_secret.mimir_nonprod_credentials.data.username
nrk_sso = true #* We will enable SSO on all ingress`s automaticly
nrk_sso_proxy = true
prometheus_adapter_custom_rules_yaml = null
promtail_enable_syslog = true
promtail_enable_syslog_remote = true
promtail_tolerations = null
revision_helm_vault = null
slack_contact = "plattform"
slack_notification = "plattform-utvikleropplevelse-alerts-prod"
tag_nrk_component = "Kubernetes on VmWare"
tag_nrk_environment = "non-production"
tag_nrk_product_id = "none"
tag_nrk_product = "kubernetes-gkee-plattform-int-felles-provider-test"
tag_nrk_support-group = "plattform-utvikleropplevelse"
tempo_address = "https://tempo-ingest-nonprod.nrk.cloud/otlp"
tempo_password = data.vault_generic_secret.tempo_nonprod_credentials.data.password
tempo_username = data.vault_generic_secret.tempo_nonprod_credentials.data.username
vault_secret_write = true
victoriametrics_client_auth_password = data.lastpass_secret.victoriametrics_client_auth.password
victoriametrics_client_auth_username = data.lastpass_secret.victoriametrics_client_auth.username
providers = {
azuread = azuread
azurerm = azurerm.drift
grafana = grafana
google = google.gkee-plattform-int-iplab-onprem
vault = vault.plattform-k8s
lastpass = lastpass
random = random
}
}
data "azuread_group" "admin-group" {
object_id = var.gkee_cluster_admin_group
security_enabled = true
}
data "azuread_user" "admin-users" {
for_each = toset(data.azuread_group.admin-group.members)
object_id = each.key
}
locals {
external_dns_zone_name = var.ingress_external ? "${var.google_dns_name}.${var.google_dns_zone_name}" : ""
internal_dns_zone_name = var.ingress_internal ? var.google_dns_name_internal == null ? "${var.google_dns_name}.${var.google_dns_zone_name}" : "${var.google_dns_name_internal}.${var.google_dns_zone_name}" : ""
# global-auth-url = "http://${module.nrk-sso.0.sso_service_ip}:4180/oauth2/auth"
ingress_extra_config = (var.nrk_sso && var.nrk_sso_proxy) ? {
global-auth-url = ""
global-auth-signin = "https://nrk-sso.${local.external_dns_zone_name}/oauth2/start?rd=$scheme://$host$escaped_request_uri"
no-auth-locations = "/.well-known/acme-challenge"
location-snippet = <<EOD
auth_request_set $user $upstream_http_x_auth_request_preferred_username;
auth_request_set $email $upstream_http_x_auth_request_email;
auth_request_set $groups $upstream_http_x_auth_request_groups;
auth_request_set $access_token $upstream_http_x_auth_request_access_token;
proxy_set_header X-Forwarded-User $user;
proxy_set_header X-Forwarded-Email $email;
proxy_set_header X-Forwarded-Groups $groups;
proxy_set_header X-Forwarded-Access-Token $access_token;
EOD
} : {}
promtail_tolerations = var.ingress_node_pool_enable ? [
{
key = "dedicated"
operator = "Equal"
effect = "NoSchedule"
value = "ingress-nginx"
}
] : []
ingress_nodepool = [{
config = {
cpus = 4
memory_mb = 8196
replicas = var.ingress_autoscaler == null ? 3 : var.ingress_autoscaler.min_replicas
image_type = "cos"
image = null
boot_disk_size_gb = 100
taints = [{
key = "dedicated"
value = "ingress-nginx"
effect = "NO_SCHEDULE"
# operator = "Equal"
}]
labels = {
agentpool = "ingress"
nrk-node-pool = "ingress-nginx"
}
vsphere_config = {
datastore = var.cluster_vcenter.datastore
tags = null
host_groups = []
}
enable_load_balancer = true
}
name = "${var.cluster_name}-ingress-nginx"
location = var.cluster_location
display_name = null
annotations = {}
vmware_cluster = google_gkeonprem_vmware_cluster.gkee_onprem_cluster.name
node_pool_autoscaling = {
min_replicas = var.ingress_autoscaler == null ? 3 : var.ingress_autoscaler.min_replicas
max_replicas = var.ingress_autoscaler == null ? 10 : var.ingress_autoscaler.max_replicas
}
project = var.cluster_google_project
}]
cluster_nodepools = concat(flatten(var.cluster_nodepools), local.ingress_nodepool)
}
resource "google_dns_record_set" "gke-dns-record-external" {
count = var.ingress_external ? 1 : 0
name = "*.${local.external_dns_zone_name}."
managed_zone = replace(var.google_dns_zone_name, ".", "-")
type = "A"
ttl = 3600
rrdatas = [module.common.ingress_ip_external]
}
resource "google_dns_record_set" "gke-dns-record-internal" {
count = var.ingress_internal ? 1 : 0
name = "*.${local.internal_dns_zone_name}."
managed_zone = replace(var.google_dns_zone_name, ".", "-")
type = "A"
ttl = 3600
rrdatas = [module.common.ingress_ip_internal]
}
resource "google_gkeonprem_vmware_cluster" "gkee_onprem_cluster" {
lifecycle {
ignore_changes = [
state,
status,
effective_annotations,
vcenter,
anti_affinity_groups,
storage,
upgrade_policy
]
}
description = "Onprem cluster deployed in VmWare by Terraform"
admin_cluster_membership = var.cluster_admin_cluster_membership
on_prem_version = var.cluster_on_prem_version
name = var.cluster_name
location = var.cluster_location
annotations = var.cluster_annotations
vm_tracking_enabled = var.cluster_vm_tracking_enabled
enable_control_plane_v2 = var.cluster_enable_control_plane_v2
disable_bundled_ingress = var.cluster_disable_bundled_ingress
control_plane_node {
cpus = tonumber(var.cluster_control_plane_node.cpus)
memory = tonumber(var.cluster_control_plane_node.memory)
replicas = tonumber(var.cluster_control_plane_node.replicas)
auto_resize_config {
enabled = tobool(var.cluster_control_plane_node.auto_resize_config)
}
}
anti_affinity_groups {
aag_config_disabled = var.cluster_anti_affinity_groups
}
storage {
vsphere_csi_disabled = var.cluster_storage_vsphere_csi_disabled
}
network_config {
service_address_cidr_blocks = var.cluster_network_config.service_address_cidr_blocks
pod_address_cidr_blocks = var.cluster_network_config.pod_address_cidr_blocks
vcenter_network = var.cluster_network_config.vcenter_network
dhcp_ip_config {
enabled = var.cluster_network_config.dhcp_ip_config
}
dynamic "static_ip_config" {
for_each = var.cluster_network_config.static_ip_config_ip_blocks != null ? [{ "static_ip_config_ip_blocks" : var.cluster_network_config.static_ip_config_ip_blocks }] : []
content {
ip_blocks {
gateway = var.cluster_network_config.static_ip_config_ip_blocks.gateway
netmask = var.cluster_network_config.static_ip_config_ip_blocks.netmask
dynamic "ips" {
for_each = var.cluster_network_config.static_ip_config_ip_blocks.ips
content {
ip = ips.value.ip
hostname = ips.value.hostname
}
}
}
}
}
host_config {
dns_servers = var.cluster_network_config.host_config.dns_servers
ntp_servers = var.cluster_network_config.host_config.ntp_servers
dns_search_domains = var.cluster_network_config.host_config.dns_search_domains
}
dynamic "control_plane_v2_config" {
for_each = var.cluster_network_config.control_plane_v2_config != null ? [{ "control_plane_v2_config" : var.cluster_network_config.control_plane_v2_config }] : []
content {
control_plane_ip_block {
gateway = var.cluster_network_config.control_plane_v2_config.gateway
netmask = var.cluster_network_config.control_plane_v2_config.netmask
dynamic "ips" {
for_each = var.cluster_network_config.control_plane_v2_config.ips
content {
ip = ips.value.ip
hostname = ips.value.hostname
}
}
}
}
}
}
load_balancer {
dynamic "vip_config" {
for_each = var.cluster_load_balancer.vip_config != null ? [{ "vip_config" : var.cluster_load_balancer.vip_config }] : []
content {
control_plane_vip = var.cluster_load_balancer.vip_config.control_plane_vip
ingress_vip = var.cluster_load_balancer.vip_config.ingress_vip
}
}
dynamic "f5_config" {
for_each = var.cluster_load_balancer.f5_config != null ? [{ "f5_config" : var.cluster_load_balancer.f5_config }] : []
content {
address = var.cluster_load_balancer.f5_config.address
partition = var.cluster_load_balancer.f5_config.partition
snat_pool = var.cluster_load_balancer.f5_config.snat_pool
}
}
dynamic "manual_lb_config" {
for_each = var.cluster_load_balancer.manual_lb_config != null ? [{ "manual_lb_config" : var.cluster_load_balancer.manual_lb_config }] : []
content {
ingress_http_node_port = var.cluster_load_balancer.manual_lb_config.ingress_http_node_port
ingress_https_node_port = var.cluster_load_balancer.manual_lb_config.ingress_https_node_port
control_plane_node_port = var.cluster_load_balancer.manual_lb_config.control_plane_node_port
konnectivity_server_node_port = var.cluster_load_balancer.manual_lb_config.konnectivity_server_node_port
}
}
dynamic "metal_lb_config" {
for_each = var.cluster_load_balancer.metal_lb_config != null ? [{ "metal_lb_config" : var.cluster_load_balancer.metal_lb_config }] : []
content {
dynamic "address_pools" {
for_each = var.cluster_load_balancer.metal_lb_config
content {
pool = address_pools.value.pool
addresses = address_pools.value.addresses
avoid_buggy_ips = address_pools.value.avoid_buggy_ips
manual_assign = address_pools.value.manual_assign
}
}
}
}
}
dataplane_v2 {
dataplane_v2_enabled = var.cluster_dataplane_v2.dataplane_v2_enabled
windows_dataplane_v2_enabled = var.cluster_dataplane_v2.windows_dataplane_v2_enabled
advanced_networking = var.cluster_dataplane_v2.advanced_networking
}
auto_repair_config {
enabled = var.cluster_auto_repair_config
}
authorization {
dynamic "admin_users" {
for_each = toset(data.azuread_group.admin-group.members)
content {
username = data.azuread_user.admin-users[admin_users.key].user_principal_name
}
}
}
upgrade_policy {
control_plane_only = var.cluster_upgrade_policy_control_plane_only
}
vcenter {
resource_pool = var.cluster_vcenter.resource_pool
datastore = var.cluster_vcenter.datastore
datacenter = var.cluster_vcenter.datacenter
cluster = var.cluster_vcenter.cluster
folder = var.cluster_vcenter.folder
ca_cert_data = base64decode(var.cluster_vcenter.ca_cert_data)
storage_policy_name = var.cluster_vcenter.storage_policy_name
}
timeouts {
create = "120m"
update = "120m"
delete = "120m"
}
}
resource "google_gkeonprem_vmware_node_pool" "gkee_onprem_cluster" {
for_each = { for k, v in local.cluster_nodepools : "${v.name}" => v }
lifecycle {
ignore_changes = [
config.0.image,
config.0.vsphere_config,
status,
effective_annotations,
state,
uid,
update_time
]
}
name = each.value.name == null ? "${var.cluster_name}-np" : each.value.name
vmware_cluster = each.value.vmware_cluster == null ? google_gkeonprem_vmware_cluster.gkee_onprem_cluster.name : each.value.vmware_cluster
location = var.cluster_location
display_name = each.value.display_name != null ? each.value.display_name : null
annotations = each.value.annotations
project = var.cluster_google_project
dynamic "config" {
for_each = each.value.config == null ? {} : { "config" = each.value.config }
content {
cpus = config.value.cpus
memory_mb = config.value.memory_mb
replicas = config.value.replicas
image_type = config.value.image_type
image = config.value.image
boot_disk_size_gb = config.value.boot_disk_size_gb
labels = config.value.labels
enable_load_balancer = config.value.enable_load_balancer
dynamic "taints" {
for_each = config.value.taints
content {
key = taints.value.key
value = taints.value.value
effect = taints.value.effect
# operator = taint.value.operator
}
}
vsphere_config {
datastore = config.value.vsphere_config != null ? config.value.vsphere_config.datastore != null ? config.value.vsphere_config.datastore : var.cluster_vcenter.datastore : null
host_groups = config.value.vsphere_config != null ? config.value.vsphere_config.host_groups : null
dynamic "tags" {
for_each = config.value.vsphere_config != null ? config.value.vsphere_config.tags != null ? { "tags" = config.value.vsphere_config.tags } : {} : {}
content {
category = tags.value.category
tag = tags.value.tag
}
}
}
}
}
dynamic "node_pool_autoscaling" {
for_each = each.value.node_pool_autoscaling == null ? {} : { "node_pool_autoscaling" = each.value.node_pool_autoscaling }
content {
min_replicas = node_pool_autoscaling.value.min_replicas
max_replicas = node_pool_autoscaling.value.max_replicas
}
}
timeouts {
create = "120m"
update = "120m"
delete = "120m"
}
}
resource "terraform_data" "kubeconfig" {
depends_on = [google_gkeonprem_vmware_cluster.gkee_onprem_cluster]
provisioner "remote-exec" {
connection {
host = var.gkee_admin_workstation_ip
user = "ubuntu"
private_key = var.gkee_admin_workstation_ssh_key
timeout = "5000"
}
inline = [
"KUBECONFIG=/home/ubuntu/kubeconfig kubectl get secret admin -n ${google_gkeonprem_vmware_cluster.gkee_onprem_cluster.name} -o go-template='{{ index .data \"admin.conf\" }}' | base64 -d > /home/ubuntu/${google_gkeonprem_vmware_cluster.gkee_onprem_cluster.name}-kubeconfig",
]
}
}
data "remote_file" "kubeconfig" {
conn {
host = var.gkee_admin_workstation_ip
user = "ubuntu"
private_key = var.gkee_admin_workstation_ssh_key
sudo = false
timeout = "5000"
}
path = "/home/ubuntu/${var.cluster_name}-kubeconfig"
depends_on = [terraform_data.kubeconfig]
}
provider "kubernetes" {
alias = "gke"
host = yamldecode(data.remote_file.kubeconfig.content).clusters[0].cluster.server
cluster_ca_certificate = base64decode(yamldecode(data.remote_file.kubeconfig.content).clusters[0].cluster.certificate-authority-data)
client_certificate = base64decode(yamldecode(data.remote_file.kubeconfig.content).users[0].user.client-certificate-data)
client_key = base64decode(yamldecode(data.remote_file.kubeconfig.content).users[0].user.client-key-data)
}
provider "helm" {
alias = "gke"
kubernetes {
host = yamldecode(data.remote_file.kubeconfig.content).clusters[0].cluster.server
cluster_ca_certificate = base64decode(yamldecode(data.remote_file.kubeconfig.content).clusters[0].cluster.certificate-authority-data)
client_certificate = base64decode(yamldecode(data.remote_file.kubeconfig.content).users[0].user.client-certificate-data)
client_key = base64decode(yamldecode(data.remote_file.kubeconfig.content).users[0].user.client-key-data)
}
}
module "nrk-sso" {
count = var.nrk_sso_proxy ? 1 : 0
source = "terraform-registry.nrk.cloud/nrkno/iac-terraform-kubernetes-nrk-sso/generic"
version = "1.1.5"
cluster_external_dns_name = var.ingress_internal ? local.internal_dns_zone_name : local.external_dns_zone_name
storage_class = "standard-rwo"
slack_contact = var.slack_contact
slack_notification = var.slack_notification
tag_nrk_component = var.tag_nrk_component
tag_nrk_environment = var.tag_nrk_environment
tag_nrk_product_id = var.tag_nrk_product_id
tag_nrk_product = var.tag_nrk_product
tag_nrk_support-group = var.tag_nrk_support-group
providers = {
azuread = azuread
azurerm = azurerm
kubernetes = kubernetes.gke
lastpass = lastpass
random = random
vault = vault
}
}
module "common" {
source = "terraform-registry.nrk.cloud/nrkno/iac-terraform-kubernetes-common/generic"
version = "13.0.0"
depends_on = [google_gkeonprem_vmware_cluster.gkee_onprem_cluster, google_gkeonprem_vmware_node_pool.gkee_onprem_cluster]
cloud_provider = "google"
creator = var.creator
kube_cluster_owner_team = var.kube_cluster_owner_team
kube_cluster_name = var.cluster_name
# kube_config_raw = data.remote_file.kubeconfig.content
kube_endpoint = yamldecode(data.remote_file.kubeconfig.content).clusters[0].cluster.server
kube_cluster_ca_certificate = base64decode(yamldecode(data.remote_file.kubeconfig.content).clusters[0].cluster.certificate-authority-data)
kube_client_certificate = base64decode(yamldecode(data.remote_file.kubeconfig.content).users[0].user.client-certificate-data)
kube_client_key = base64decode(yamldecode(data.remote_file.kubeconfig.content).users[0].user.client-key-data)
kube_password = var.kube_password
kube_username = var.kube_username
# kube_token = base64decode(yamldecode(data.remote_file.kubeconfig.content).users[0].user.token)
kube_outbound_ip = var.kube_outbound_ip
external_dns_zone_name = local.external_dns_zone_name
internal_dns_zone_name = local.internal_dns_zone_name
location = var.cluster_location
slack_contact = var.slack_contact
slack_notification = var.slack_notification
k8s_cert-view_ns = var.k8s_cert-view_ns
enable_ingress_nginx_helm = var.enable_ingress_nginx_helm
ingress_default_ssl_cert = var.ingress_default_ssl_cert
ingress_environment = var.ingress_environment
ingress_source_ip_list = var.ingress_source_ip_list
ingress_source_ip_list_internal = var.ingress_source_ip_list_internal
ingress_external = var.ingress_external
ingress_internal = var.ingress_internal
ingress_autoscaler = var.ingress_autoscaler
ingress_use_x_forwarded_for = var.ingress_use_x_forwarded_for
ingress_extra_args = var.ingress_extra_args
ingress_helm_extra_args = var.ingress_helm_extra_args
ingress_extra_config = merge(var.ingress_extra_config, local.ingress_extra_config)
ingress_nrk_monitoring = var.ingress_nrk_monitoring
ingress_requests = var.ingress_requests
ingress_limits = var.ingress_limits
ingress_node_pool_enable = var.ingress_node_pool_enable
ingress_admission_webhook_priority_class_name = var.ingress_admission_webhook_priority_class_name
ingress_controller_priority_class_name = var.ingress_controller_priority_class_name
ingress_criticality = var.ingress_criticality
ingress_default_backend_priority_class_name = var.ingress_default_backend_priority_class_name
disable_access_log = var.disable_access_log
enable_icinga = var.enable_icinga
icinga_k8s_ssh_image = var.icinga_k8s_ssh_image
icinga_k8s_ssh_key = var.icinga_k8s_ssh_key
icinga_k8s_ssh_user = var.icinga_k8s_ssh_user
enable_cert_manager = var.enable_cert_manager
enable_cert_manager_clusterissuer = var.enable_cert_manager_clusterissuer
letsencrypt_cloud_dns_provider = "gcloud"
letsencrypt_cloud_dns_provider_yaml = local.letsencrypt_cloud_dns_provider
letsencrypt_email = var.letsencrypt_email
akamai_dns_api_user = var.akamai_dns_api_user
# victoriametrics_client_auth_username = var.victoriametrics_client_auth_username
# victoriametrics_client_auth_password = var.victoriametrics_client_auth_password
enable_victoriametrics = var.enable_victoriametrics
# escloud_plattform_server_url = var.escloud_plattform_server_url
# escloud_plattform_password = var.escloud_plattform_password
# escloud_plattform_username = var.escloud_plattform_username
enable_gorgon = var.enable_gorgon
enable_opentelemetry = var.enable_opentelemetry
enable_smb_csi = var.enable_smb_csi
enable_scalingschedule_controller = var.enable_scalingschedule_controller
enable_filebeat = var.enable_filebeat
enable_vault_auth = var.enable_vault_auth
vault_server_url = var.vault_server_url
vault_secret_write = var.vault_secret_write
kubernetes_priority_classes = var.kubernetes_priority_classes
kured = var.kured
kured_args = var.kured_args
enable_prometheus_stack = var.enable_prometheus_stack
enable_prometheus_operator = var.enable_prometheus_operator
enable_prometheus_adapter = var.enable_prometheus_adapter
prometheus_adapter_custom_rules_yaml = var.prometheus_adapter_custom_rules_yaml
promtail_enable_syslog = var.promtail_enable_syslog
promtail_enable_syslog_remote = var.promtail_enable_syslog_remote
enable_promtail = var.enable_promtail
promtail_tolerations = local.promtail_tolerations
revision_helm_vault = var.revision_helm_vault
loki_username = var.loki_username
loki_password = var.loki_password
loki_address = var.loki_address
enable_local_loki = var.enable_local_loki
tempo_username = var.tempo_username
tempo_password = var.tempo_password
tempo_address = var.tempo_address
mimir_username = var.mimir_username
mimir_password = var.mimir_password
mimir_address = var.mimir_address
enable_grafana_mimir = var.enable_grafana_mimir
helm_github_auth = var.helm_github_auth
enable_gatekeeper = var.enable_gatekeeper
gatekeeper_constraint_enforcement_action = var.gatekeeper_constraint_enforcement_action
gatekeeper_priority_class_name = var.gatekeeper_priority_class_name
gatekeeper_controller_manager_resources = var.gatekeeper_controller_manager_resources
gatekeeper_exempt_namespaces = var.gatekeeper_exempt_namespaces
gatekeeper_sync_profile = var.gatekeeper_sync_profile
providers = {
azuread = azuread
azurerm = azurerm
grafana = grafana
kubernetes = kubernetes.gke
helm = helm.gke
vault = vault
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment