Skip to content

Instantly share code, notes, and snippets.

@walf443
Created August 14, 2023 06:59
Show Gist options
  • Save walf443/c55eae54e453458297c0585befb6a030 to your computer and use it in GitHub Desktop.
Save walf443/c55eae54e453458297c0585befb6a030 to your computer and use it in GitHub Desktop.
k0s debian_12 ostest log
TF_VAR_os=debian_12 terraform apply
module.k0sctl.data.http.k0s_version[0]: Reading...
module.k0sctl.data.http.k0s_version[0]: Read complete after 0s [id=https://docs.k0sproject.io/stable.txt]
module.infra.data.aws_availability_zones.available: Reading...
module.infra.data.aws_vpc.default: Reading...
module.os.data.aws_ami.debian_12[0]: Reading...
module.infra.data.aws_availability_zones.available: Read complete after 0s [id=ap-northeast-1]
module.os.data.aws_ami.debian_12[0]: Read complete after 0s [id=ami-0bef69e9bf0a285ef]
module.infra.data.aws_ec2_instance_type_offerings.in_available_azs["t3a.small"]: Reading...
module.infra.data.aws_ec2_instance_type_offerings.in_available_azs["t3a.small"]: Read complete after 1s [id=ap-northeast-1]
module.infra.data.aws_vpc.default: Read complete after 1s [id=vpc-62fc1504]
Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
+ create
<= read (data resources)
Terraform will perform the following actions:
# local_sensitive_file.ssh_private_key will be created
+ resource "local_sensitive_file" "ssh_private_key" {
+ content = (sensitive value)
+ content_base64sha256 = (known after apply)
+ content_base64sha512 = (known after apply)
+ content_md5 = (known after apply)
+ content_sha1 = (known after apply)
+ content_sha256 = (known after apply)
+ content_sha512 = (known after apply)
+ directory_permission = "0700"
+ file_permission = "0400"
+ filename = (known after apply)
+ id = (known after apply)
}
# random_pet.resource_name_prefix[0] will be created
+ resource "random_pet" "resource_name_prefix" {
+ id = (known after apply)
+ length = 2
+ separator = "-"
}
# module.infra.data.aws_subnet.default_for_selected_az will be read during apply [562/1984]
# (config refers to values not yet known)
<= data "aws_subnet" "default_for_selected_az" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = (known after apply)
+ availability_zone = (known after apply)
+ availability_zone_id = (known after apply)
+ available_ip_address_count = (known after apply)
+ cidr_block = (known after apply)
+ customer_owned_ipv4_pool = (known after apply)
+ default_for_az = true
+ enable_dns64 = (known after apply)
+ enable_lni_at_device_index = (known after apply)
+ enable_resource_name_dns_a_record_on_launch = (known after apply)
+ enable_resource_name_dns_aaaa_record_on_launch = (known after apply)
+ id = (known after apply)
+ ipv6_cidr_block = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = (known after apply)
+ map_customer_owned_ip_on_launch = (known after apply)
+ map_public_ip_on_launch = (known after apply)
+ outpost_arn = (known after apply)
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ state = (known after apply)
+ tags = (known after apply)
+ vpc_id = "vpc-62fc1504"
}
# module.infra.aws_instance.nodes["controller-1"] will be created [533/1984]
+ resource "aws_instance" "nodes" {
+ ami = "ami-0bef69e9bf0a285ef"
+ arn = (known after apply)
+ associate_public_ip_address = true
+ availability_zone = (known after apply)
+ cpu_core_count = (known after apply)
+ cpu_threads_per_core = (known after apply)
+ disable_api_stop = (known after apply)
+ disable_api_termination = (known after apply)
+ ebs_optimized = (known after apply)
+ get_password_data = false
+ host_id = (known after apply)
+ host_resource_group_arn = (known after apply)
+ iam_instance_profile = (known after apply)
+ id = (known after apply)
+ instance_initiated_shutdown_behavior = (known after apply)
+ instance_lifecycle = (known after apply)
+ instance_state = (known after apply)
+ instance_type = "t3a.small"
+ ipv6_address_count = (known after apply)
+ ipv6_addresses = (known after apply)
+ key_name = (known after apply)
+ monitoring = (known after apply)
+ outpost_arn = (known after apply)
+ password_data = (known after apply)
+ placement_group = (known after apply)
+ placement_partition_number = (known after apply)
+ primary_network_interface_id = (known after apply)
+ private_dns = (known after apply)
+ private_ip = (known after apply)
+ public_dns = (known after apply)
+ public_ip = (known after apply)
+ secondary_private_ips = (known after apply)
+ security_groups = (known after apply)
+ source_dest_check = true
+ spot_instance_request_id = (known after apply)
+ subnet_id = (known after apply)
+ tags = (known after apply)
+ tags_all = (known after apply)
+ tenancy = (known after apply)
+ user_data = "892da2364cef3686d103b2806e5e285e998bb9ae"
+ user_data_base64 = (known after apply)
+ user_data_replace_on_change = false
+ vpc_security_group_ids = (known after apply)
+ root_block_device {
+ delete_on_termination = true
+ device_name = (known after apply)
+ encrypted = (known after apply)
+ iops = (known after apply)
+ kms_key_id = (known after apply)
+ throughput = (known after apply)
+ volume_id = (known after apply)
+ volume_size = 20
+ volume_type = "gp2"
}
}
[475/1984]
# module.infra.aws_instance.nodes["controller-2"] will be created
+ resource "aws_instance" "nodes" {
+ ami = "ami-0bef69e9bf0a285ef"
+ arn = (known after apply)
+ associate_public_ip_address = true
+ availability_zone = (known after apply)
+ cpu_core_count = (known after apply)
+ cpu_threads_per_core = (known after apply)
+ disable_api_stop = (known after apply)
+ disable_api_termination = (known after apply)
+ ebs_optimized = (known after apply)
+ get_password_data = false
+ host_id = (known after apply)
+ host_resource_group_arn = (known after apply)
+ iam_instance_profile = (known after apply)
+ id = (known after apply)
+ instance_initiated_shutdown_behavior = (known after apply)
+ instance_lifecycle = (known after apply)
+ instance_state = (known after apply)
+ instance_type = "t3a.small"
+ ipv6_address_count = (known after apply)
+ ipv6_addresses = (known after apply)
+ key_name = (known after apply)
+ monitoring = (known after apply)
+ outpost_arn = (known after apply)
+ password_data = (known after apply)
+ placement_group = (known after apply)
+ placement_partition_number = (known after apply)
+ primary_network_interface_id = (known after apply)
+ private_dns = (known after apply)
+ private_ip = (known after apply)
+ public_dns = (known after apply)
+ public_ip = (known after apply)
+ secondary_private_ips = (known after apply)
+ security_groups = (known after apply)
+ source_dest_check = true
+ spot_instance_request_id = (known after apply)
+ subnet_id = (known after apply)
+ tags = (known after apply)
+ tags_all = (known after apply)
+ tenancy = (known after apply)
+ user_data = "892da2364cef3686d103b2806e5e285e998bb9ae"
+ user_data_base64 = (known after apply)
+ user_data_replace_on_change = false
+ vpc_security_group_ids = (known after apply)
+ root_block_device {
+ delete_on_termination = true
+ device_name = (known after apply)
+ encrypted = (known after apply)
+ iops = (known after apply)
+ kms_key_id = (known after apply)
+ throughput = (known after apply)
+ volume_id = (known after apply)
+ volume_size = 20
+ volume_type = "gp2"
}
}
# module.infra.aws_instance.nodes["controller-3"] will be created
+ resource "aws_instance" "nodes" {
+ ami = "ami-0bef69e9bf0a285ef"
+ arn = (known after apply)
+ associate_public_ip_address = true
+ availability_zone = (known after apply)
+ cpu_core_count = (known after apply)
+ cpu_threads_per_core = (known after apply)
+ disable_api_stop = (known after apply)
+ disable_api_termination = (known after apply)
+ ebs_optimized = (known after apply)
+ get_password_data = false
+ host_id = (known after apply)
+ host_resource_group_arn = (known after apply)
+ iam_instance_profile = (known after apply)
+ id = (known after apply)
+ instance_initiated_shutdown_behavior = (known after apply)
+ instance_lifecycle = (known after apply)
+ instance_state = (known after apply)
+ instance_type = "t3a.small"
+ ipv6_address_count = (known after apply)
+ ipv6_addresses = (known after apply)
+ key_name = (known after apply)
+ monitoring = (known after apply)
+ outpost_arn = (known after apply)
+ password_data = (known after apply)
+ placement_group = (known after apply)
+ placement_partition_number = (known after apply)
+ primary_network_interface_id = (known after apply)
+ private_dns = (known after apply)
+ private_ip = (known after apply)
+ public_dns = (known after apply)
+ public_ip = (known after apply)
+ secondary_private_ips = (known after apply)
+ security_groups = (known after apply)
+ source_dest_check = true
+ spot_instance_request_id = (known after apply)
+ subnet_id = (known after apply)
+ tags = (known after apply)
+ tags_all = (known after apply)
+ tenancy = (known after apply)
+ user_data = "892da2364cef3686d103b2806e5e285e998bb9ae"
+ user_data_base64 = (known after apply)
+ user_data_replace_on_change = false
+ vpc_security_group_ids = (known after apply)
+ root_block_device {
+ delete_on_termination = true
+ device_name = (known after apply)
+ encrypted = (known after apply)
+ iops = (known after apply)
+ kms_key_id = (known after apply)
+ throughput = (known after apply)
+ volume_id = (known after apply)
+ volume_size = 20
+ volume_type = "gp2"
}
}
# module.infra.aws_instance.nodes["worker-1"] will be created
+ resource "aws_instance" "nodes" {
+ ami = "ami-0bef69e9bf0a285ef"
+ arn = (known after apply)
+ associate_public_ip_address = true
+ availability_zone = (known after apply)
+ cpu_core_count = (known after apply)
+ cpu_threads_per_core = (known after apply)
+ disable_api_stop = (known after apply)
+ disable_api_termination = (known after apply)
+ ebs_optimized = (known after apply)
+ get_password_data = false
+ host_id = (known after apply)
+ host_resource_group_arn = (known after apply)
+ iam_instance_profile = (known after apply)
+ id = (known after apply)
+ instance_initiated_shutdown_behavior = (known after apply)
+ instance_lifecycle = (known after apply)
+ instance_state = (known after apply)
+ instance_type = "t3a.small"
+ ipv6_address_count = (known after apply)
+ ipv6_addresses = (known after apply)
+ key_name = (known after apply)
+ monitoring = (known after apply)
+ outpost_arn = (known after apply)
+ password_data = (known after apply)
+ placement_group = (known after apply)
+ placement_partition_number = (known after apply)
+ primary_network_interface_id = (known after apply)
+ private_dns = (known after apply)
+ private_ip = (known after apply)
+ public_dns = (known after apply)
+ public_ip = (known after apply)
+ secondary_private_ips = (known after apply)
+ security_groups = (known after apply)
+ source_dest_check = false
+ spot_instance_request_id = (known after apply)
+ subnet_id = (known after apply)
+ tags = (known after apply)
+ tags_all = (known after apply)
+ tenancy = (known after apply)
+ user_data = "892da2364cef3686d103b2806e5e285e998bb9ae"
+ user_data_base64 = (known after apply)
+ user_data_replace_on_change = false
+ vpc_security_group_ids = (known after apply)
+ root_block_device {
+ delete_on_termination = true
+ device_name = (known after apply)
+ encrypted = (known after apply)
+ iops = (known after apply)
+ kms_key_id = (known after apply)
+ throughput = (known after apply)
+ volume_id = (known after apply)
+ volume_size = 20
+ volume_type = "gp2"
}
}
# module.infra.aws_instance.nodes["worker-2"] will be created [297/1984]
+ resource "aws_instance" "nodes" {
+ ami = "ami-0bef69e9bf0a285ef"
+ arn = (known after apply)
+ associate_public_ip_address = true
+ availability_zone = (known after apply)
+ cpu_core_count = (known after apply)
+ cpu_threads_per_core = (known after apply)
+ disable_api_stop = (known after apply)
+ disable_api_termination = (known after apply)
+ ebs_optimized = (known after apply)
+ get_password_data = false
+ host_id = (known after apply)
+ host_resource_group_arn = (known after apply)
+ iam_instance_profile = (known after apply)
+ id = (known after apply)
+ instance_initiated_shutdown_behavior = (known after apply)
+ instance_lifecycle = (known after apply)
+ instance_state = (known after apply)
+ instance_type = "t3a.small"
+ ipv6_address_count = (known after apply)
+ ipv6_addresses = (known after apply)
+ key_name = (known after apply)
+ monitoring = (known after apply)
+ outpost_arn = (known after apply)
+ password_data = (known after apply)
+ placement_group = (known after apply)
+ placement_partition_number = (known after apply)
+ primary_network_interface_id = (known after apply)
+ private_dns = (known after apply)
+ private_ip = (known after apply)
+ public_dns = (known after apply)
+ public_ip = (known after apply)
+ secondary_private_ips = (known after apply)
+ security_groups = (known after apply)
+ source_dest_check = false
+ spot_instance_request_id = (known after apply)
+ subnet_id = (known after apply)
+ tags = (known after apply)
+ tags_all = (known after apply)
+ tenancy = (known after apply)
+ user_data = "892da2364cef3686d103b2806e5e285e998bb9ae"
+ user_data_base64 = (known after apply)
+ user_data_replace_on_change = false
+ vpc_security_group_ids = (known after apply)
+ root_block_device {
+ delete_on_termination = true
+ device_name = (known after apply)
+ encrypted = (known after apply)
+ iops = (known after apply)
+ kms_key_id = (known after apply)
+ throughput = (known after apply)
+ volume_id = (known after apply)
+ volume_size = 20
+ volume_type = "gp2"
}
}
# module.infra.aws_key_pair.ssh will be created [238/1984]
+ resource "aws_key_pair" "ssh" {
+ arn = (known after apply)
+ fingerprint = (known after apply)
+ id = (known after apply)
+ key_name = (known after apply)
+ key_name_prefix = (known after apply)
+ key_pair_id = (known after apply)
+ key_type = (known after apply)
+ public_key = (known after apply)
+ tags_all = (known after apply)
}
# module.infra.aws_route_table_association.default_for_selected_az will be created
+ resource "aws_route_table_association" "default_for_selected_az" {
+ id = (known after apply)
+ route_table_id = "rtb-a693d5c0"
+ subnet_id = (known after apply)
}
# module.infra.aws_security_group.controller will be created
+ resource "aws_security_group" "controller" {
+ arn = (known after apply)
+ description = "Allow API server access from the internet."
+ egress = (known after apply)
+ id = (known after apply)
+ ingress = (known after apply)
+ name = (known after apply)
+ name_prefix = (known after apply)
+ owner_id = (known after apply)
+ revoke_rules_on_delete = false
+ tags = (known after apply)
+ tags_all = (known after apply)
+ vpc_id = "vpc-62fc1504"
}
# module.infra.aws_security_group.node will be created
+ resource "aws_security_group" "node" {
+ arn = (known after apply)
+ description = "Allow ALL ingress traffic inside the subnet, ALL egress traffic to the outside and SSH from the internet."
+ egress = (known after apply)
+ id = (known after apply)
+ ingress = (known after apply)
+ name = (known after apply)
+ name_prefix = (known after apply)
+ owner_id = (known after apply)
+ revoke_rules_on_delete = false
+ tags = (known after apply)
+ tags_all = (known after apply)
+ vpc_id = "vpc-62fc1504"
}
# module.infra.aws_security_group_rule.node_additional_ingress[0] will be created
+ resource "aws_security_group_rule" "node_additional_ingress" {
+ cidr_blocks = [
+ "10.244.0.0/16",
]
+ description = "Allow ingress from additional CIDRs."
+ from_port = 0
+ id = (known after apply)
+ protocol = "-1"
+ security_group_id = (known after apply)
+ security_group_rule_id = (known after apply)
+ self = false
+ source_security_group_id = (known after apply)
+ to_port = 65535
+ type = "ingress"
}
# module.infra.aws_security_group_rule.node_all_egress will be created [169/1984]
+ resource "aws_security_group_rule" "node_all_egress" {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = "Allow ALL egress traffic."
+ from_port = 0
+ id = (known after apply)
+ protocol = "-1"
+ security_group_id = (known after apply)
+ security_group_rule_id = (known after apply)
+ self = false
+ source_security_group_id = (known after apply)
+ to_port = 65535
+ type = "egress"
}
# module.infra.aws_security_group_rule.node_public_ssh will be created
+ resource "aws_security_group_rule" "node_public_ssh" {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = "Allow SSH access from the internet."
+ from_port = 22
+ id = (known after apply)
+ protocol = "tcp"
+ security_group_id = (known after apply)
+ security_group_rule_id = (known after apply)
+ self = false
+ source_security_group_id = (known after apply)
+ to_port = 22
+ type = "ingress"
}
# module.infra.aws_security_group_rule.node_subnet_ingress will be created
+ resource "aws_security_group_rule" "node_subnet_ingress" {
+ cidr_blocks = (known after apply)
+ description = "Allow ALL ingress traffic inside the subnet."
+ from_port = 0
+ id = (known after apply)
+ protocol = "-1"
+ security_group_id = (known after apply)
+ security_group_rule_id = (known after apply)
+ self = false
+ source_security_group_id = (known after apply)
+ to_port = 65535
+ type = "ingress"
}
# module.infra.aws_security_group_rule.public_api_server will be created
+ resource "aws_security_group_rule" "public_api_server" {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = "Allow API server access from the internet."
+ from_port = 6443
+ id = (known after apply)
+ protocol = "tcp"
+ security_group_id = (known after apply)
+ security_group_rule_id = (known after apply)
+ self = false
+ source_security_group_id = (known after apply)
+ to_port = 6443
+ type = "ingress"
}
# module.infra.random_shuffle.selected_az will be created [103/1984]
+ resource "random_shuffle" "selected_az" {
+ id = (known after apply)
+ input = [
+ "ap-northeast-1a",
+ "ap-northeast-1d",
]
+ result = (known after apply)
+ result_count = 1
}
# module.infra.terraform_data.provisioned_nodes will be created
+ resource "terraform_data" "provisioned_nodes" {
+ id = (known after apply)
+ input = [
+ {
+ connection = (known after apply)
+ ipv4 = (known after apply)
+ is_controller = (known after apply)
+ is_worker = (known after apply)
+ name = (known after apply)
+ role = (known after apply)
},
+ {
+ connection = (known after apply)
+ ipv4 = (known after apply)
+ is_controller = (known after apply)
+ is_worker = (known after apply)
+ name = (known after apply)
+ role = (known after apply)
},
+ {
+ connection = (known after apply)
+ ipv4 = (known after apply)
+ is_controller = (known after apply)
+ is_worker = (known after apply)
+ name = (known after apply)
+ role = (known after apply)
},
+ {
+ connection = (known after apply)
+ ipv4 = (known after apply)
+ is_controller = (known after apply)
+ is_worker = (known after apply)
+ name = (known after apply)
+ role = (known after apply)
},
+ {
+ connection = (known after apply)
+ ipv4 = (known after apply)
+ is_controller = (known after apply)
+ is_worker = (known after apply)
+ name = (known after apply)
+ role = (known after apply)
},
]
+ output = (known after apply)
}
# module.infra.tls_private_key.ssh will be created [44/1984]
+ resource "tls_private_key" "ssh" {
+ algorithm = "ED25519"
+ ecdsa_curve = "P224"
+ id = (known after apply)
+ private_key_openssh = (sensitive value)
+ private_key_pem = (sensitive value)
+ private_key_pem_pkcs8 = (sensitive value)
+ public_key_fingerprint_md5 = (known after apply)
+ public_key_fingerprint_sha256 = (known after apply)
+ public_key_openssh = (known after apply)
+ public_key_pem = (known after apply)
+ rsa_bits = 2048
}
# module.k0sctl.data.external.k0s_kubeconfig will be read during apply
# (config refers to values not yet known)
<= data "external" "k0s_kubeconfig" {
+ id = (known after apply)
+ program = [
+ "env",
+ "sh",
+ "-ec",
+ <<-EOT
jq '.k0sctl_config | fromjson' |
{ env -u SSH_AUTH_SOCK SSH_KNOWN_HOSTS='' "$1" kubeconfig --disable-telemetry -c - || echo ~~~FAIL; } |
jq --raw-input --slurp "$2"
EOT,
+ "--",
+ "k0sctl",
+ <<-EOT
if endswith("~~~FAIL\n") then
error("Failed to generate kubeconfig!")
else
{kubeconfig: .}
end
EOT,
]
+ query = {
+ "k0sctl_config" = (known after apply)
}
+ result = (known after apply)
}
# module.k0sctl.terraform_data.k0sctl_apply will be created
+ resource "terraform_data" "k0sctl_apply" {
+ id = (known after apply)
+ input = {
+ hosts = (known after apply)
+ k0sctl_config = {
+ apiVersion = "k0sctl.k0sproject.io/v1beta1"
+ kind = "Cluster"
+ metadata = {
+ name = "k0s-cluster"
}
+ spec = {
+ hosts = (known after apply)
+ k0s = {
+ config = {
+ spec = {
+ network = {
+ nodeLocalLoadBalancing = {
+ enabled = true
}
+ podCIDR = "10.244.0.0/16"
+ provider = "calico"
}
+ telemetry = {
+ enabled = false
}
}
}
+ version = "v1.27.4+k0s.0"
}
}
}
+ ssh_private_key_filename = (known after apply)
}
+ output = (known after apply)
+ triggers_replace = [
+ "k0sctl",
+ (known after apply),
]
}
# module.k0sctl.terraform_data.konnectivity_available will be created
+ resource "terraform_data" "konnectivity_available" {
+ id = (known after apply)
+ input = {
+ hosts = (known after apply)
+ k0sctl_config = (known after apply)
+ ssh_private_key_filename = (known after apply)
}
+ output = (known after apply)
+ triggers_replace = [
+ (known after apply),
+ "add72988d5a56f9000aa4e464822710b658e9bc891cb8fa3d9622462de9aa581",
]
}
Plan: 21 to add, 0 to change, 0 to destroy.Plan: 21 to add, 0 to change, 0 to destroy.
Changes to Outputs:
+ hosts = (known after apply)
+ k0s_kubeconfig = (sensitive value)
+ k0sctl_config = (known after apply)
+ ssh_private_key_filename = (known after apply)
Do you want to perform these actions?
Terraform will perform the actions described above.
Only 'yes' will be accepted to approve.
Enter a value: yes
random_pet.resource_name_prefix[0]: Creating...
random_pet.resource_name_prefix[0]: Creation complete after 0s [id=on-lab]
module.infra.tls_private_key.ssh: Creating...
module.infra.tls_private_key.ssh: Creation complete after 0s [id=467f8df98206fd342d0e176673320523527334fa]
local_sensitive_file.ssh_private_key: Creating...
local_sensitive_file.ssh_private_key: Creation complete after 0s [id=a4826e1a3149b5c901a1e310a97054b0845fa45e]
module.infra.aws_key_pair.ssh: Creating...
module.infra.random_shuffle.selected_az: Creating...
module.infra.random_shuffle.selected_az: Creation complete after 0s [id=-]
module.infra.data.aws_subnet.default_for_selected_az: Reading...
module.infra.data.aws_subnet.default_for_selected_az: Read complete after 1s [id=subnet-81208bc9]
module.infra.aws_route_table_association.default_for_selected_az: Creating...
module.infra.aws_security_group.controller: Creating...
module.infra.aws_security_group.node: Creating...
module.infra.aws_key_pair.ssh: Creation complete after 1s [id=on-lab-ssh]
module.infra.aws_route_table_association.default_for_selected_az: Creation complete after 0s [id=rtbassoc-005e841127b92bca3]
module.infra.aws_security_group.node: Creation complete after 1s [id=sg-0452c290d495ae281]
module.infra.aws_security_group.controller: Creation complete after 1s [id=sg-00cf32fee8c7614c8]
module.infra.aws_security_group_rule.node_subnet_ingress: Creating...
module.infra.aws_security_group_rule.public_api_server: Creating...
module.infra.aws_security_group_rule.node_all_egress: Creating...
module.infra.aws_security_group_rule.node_additional_ingress[0]: Creating...
module.infra.aws_security_group_rule.node_public_ssh: Creating...
module.infra.aws_instance.nodes["controller-2"]: Creating...
module.infra.aws_instance.nodes["controller-1"]: Creating...
module.infra.aws_instance.nodes["worker-1"]: Creating...
module.infra.aws_instance.nodes["worker-2"]: Creating...
module.infra.aws_instance.nodes["controller-3"]: Creating...
module.infra.aws_security_group_rule.node_all_egress: Creation complete after 1s [id=sgrule-2104405259]
module.infra.aws_security_group_rule.public_api_server: Creation complete after 1s [id=sgrule-1912799280]
module.infra.aws_security_group_rule.node_subnet_ingress: Creation complete after 1s [id=sgrule-850981835]
module.infra.aws_security_group_rule.node_additional_ingress[0]: Creation complete after 2s [id=sgrule-2495047624]
module.infra.aws_security_group_rule.node_public_ssh: Creation complete after 3s [id=sgrule-98827554]
module.infra.aws_instance.nodes["worker-2"]: Still creating... [10s elapsed]
module.infra.aws_instance.nodes["controller-1"]: Still creating... [10s elapsed]
module.infra.aws_instance.nodes["controller-3"]: Still creating... [10s elapsed]
module.infra.aws_instance.nodes["controller-2"]: Still creating... [10s elapsed]
module.infra.aws_instance.nodes["worker-1"]: Still creating... [10s elapsed]
module.infra.aws_instance.nodes["controller-1"]: Creation complete after 13s [id=i-0df613a0ab54dd623]
module.infra.aws_instance.nodes["controller-2"]: Creation complete after 13s [id=i-0cd6c5f726975858a]
module.infra.aws_instance.nodes["controller-3"]: Creation complete after 13s [id=i-05f4bbf8a01c85446]
module.infra.aws_instance.nodes["worker-2"]: Creation complete after 13s [id=i-02844a18c8e858fa0]
module.infra.aws_instance.nodes["worker-1"]: Creation complete after 13s [id=i-0581cc19644819807]
module.infra.terraform_data.provisioned_nodes: Creating...
module.infra.terraform_data.provisioned_nodes: Creation complete after 0s [id=cd84f212-8ea9-c8a6-43f1-a4bc6ff22984]
module.k0sctl.terraform_data.k0sctl_apply: Creating...
module.k0sctl.terraform_data.k0sctl_apply: Provisioning with 'local-exec'...
module.k0sctl.terraform_data.k0sctl_apply (local-exec): Executing: ["/bin/sh" "-c" "printf %s \"$K0SCTL_CONFIG\" | env -u SSH_AUTH_SOCK SSH_KNOWN_HOSTS='' \"$K0SCTL_EXECUTABLE_PATH\" apply --disable-telemetry --disable-upgrade-check -c -\n"]
module.k0sctl.terraform_data.k0sctl_apply (local-exec): ⠀⣿⣿⡇⠀⠀⢀⣴⣾⣿⠟⠁⢸⣿⣿⣿⣿⣿⣿⣿⡿⠛⠁⠀⢸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀█████████ █████████ ███
module.k0sctl.terraform_data.k0sctl_apply (local-exec): ⠀⣿⣿⡇⣠⣶⣿⡿⠋⠀⠀⠀⢸⣿⡇⠀⠀⠀⣠⠀⠀⢀⣠⡆⢸⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀███ ███ ███
module.k0sctl.terraform_data.k0sctl_apply (local-exec): ⠀⣿⣿⣿⣿⣟⠋⠀⠀⠀⠀⠀⢸⣿⡇⠀⢰⣾⣿⠀⠀⣿⣿⡇⢸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀███ ███ ███
module.k0sctl.terraform_data.k0sctl_apply (local-exec): ⠀⣿⣿⡏⠻⣿⣷⣤⡀⠀⠀⠀⠸⠛⠁⠀⠸⠋⠁⠀⠀⣿⣿⡇⠈⠉⠉⠉⠉⠉⠉⠉⠉⢹⣿⣿⠀███ ███ ███
module.k0sctl.terraform_data.k0sctl_apply (local-exec): ⠀⣿⣿⡇⠀⠀⠙⢿⣿⣦⣀⠀⠀⠀⣠⣶⣶⣶⣶⣶⣶⣿⣿⡇⢰⣶⣶⣶⣶⣶⣶⣶⣶⣾⣿⣿⠀█████████ ███ ██████████
module.k0sctl.terraform_data.k0sctl_apply (local-exec): k0sctl v0.15.4 Copyright 2023, k0sctl authors.
module.k0sctl.terraform_data.k0sctl_apply (local-exec): By continuing to use k0sctl you agree to these terms:
module.k0sctl.terraform_data.k0sctl_apply (local-exec): https://k0sproject.io/licenses/eula
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Connect to hosts"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 54.250.107.162: attempt 1 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 54.250.107.162:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 54.199.97.175: attempt 1 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 54.199.97.175:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 54.238.146.34: attempt 1 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 54.238.146.34:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 52.195.216.31: attempt 1 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 52.195.216.31:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 13.230.139.181: attempt 1 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 13.230.139.181:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 54.250.107.162: attempt 2 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 54.250.107.162:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 54.199.97.175: attempt 2 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 54.199.97.175:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 52.195.216.31: attempt 2 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 52.195.216.31:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 54.238.146.34: attempt 2 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 54.238.146.34:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 13.230.139.181: attempt 2 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 13.230.139.181:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 54.250.107.162: attempt 3 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 54.250.107.162:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 54.199.97.175: attempt 3 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 54.199.97.175:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 54.238.146.34: attempt 3 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 54.238.146.34:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [10s elapsed]
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 52.195.216.31: attempt 3 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 52.195.216.31:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=error msg="[SSH] 13.230.139.181: attempt 3 of 60.. failed to connect: not connected: client connect: ssh dial: dial tcp 13.230.139.181:22: connect: connection refused"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.250.107.162:22: connected"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: connected"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: connected"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.199.97.175:22: connected"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: connected"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Detect host operating systems"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: is running Debian GNU/Linux 12 (bookworm)"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: is running Debian GNU/Linux 12 (bookworm)"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: is running Debian GNU/Linux 12 (bookworm)"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.199.97.175:22: is running Debian GNU/Linux 12 (bookworm)"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.250.107.162:22: is running Debian GNU/Linux 12 (bookworm)"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Acquire exclusive host lock"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Prepare hosts"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Gather host facts"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: using ip-172-31-37-251 as hostname"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: using ip-172-31-42-209 as hostname"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.199.97.175:22: using ip-172-31-45-93 as hostname"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: using ip-172-31-33-110 as hostname"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.250.107.162:22: using ip-172-31-34-151 as hostname"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: discovered ens5 as private interface"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: discovered ens5 as private interface"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.199.97.175:22: discovered ens5 as private interface"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: discovered ens5 as private interface"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.250.107.162:22: discovered ens5 as private interface"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: discovered 172.31.37.251 as private address"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: discovered 172.31.42.209 as private address"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.250.107.162:22: discovered 172.31.34.151 as private address"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: discovered 172.31.33.110 as private address"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.199.97.175:22: discovered 172.31.45.93 as private address"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Validate hosts"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Gather k0s facts"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Validate facts"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Download k0s on hosts"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: downloading k0s v1.27.4+k0s.0"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: downloading k0s v1.27.4+k0s.0"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.199.97.175:22: downloading k0s v1.27.4+k0s.0"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: downloading k0s v1.27.4+k0s.0"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.250.107.162:22: downloading k0s v1.27.4+k0s.0"
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [20s elapsed]
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Install k0s binaries on hosts"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Configure k0s"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: validating configuration"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: validating configuration"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: validating configuration"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: configuration was changed"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: configuration was changed"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: configuration was changed"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Initialize the k0s cluster"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: installing k0s controller"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: waiting for the k0s service to start"
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [30s elapsed]
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [40s elapsed]
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: waiting for kubernetes api to respond"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Install controllers"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: generating token"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: writing join token"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: installing k0s controller"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: starting service"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: waiting for the k0s service to start"
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [50s elapsed]
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [1m0s elapsed]
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.238.146.34:22: waiting for kubernetes api to respond"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: generating token"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: writing join token"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: installing k0s controller"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: starting service"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: waiting for the k0s service to start"
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [1m10s elapsed]
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [1m20s elapsed]
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 52.195.216.31:22: waiting for kubernetes api to respond"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Install workers"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.199.97.175:22: validating api connection to https://172.31.33.110:6443"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.250.107.162:22: validating api connection to https://172.31.33.110:6443"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 13.230.139.181:22: generating token"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.199.97.175:22: writing join token"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.250.107.162:22: writing join token"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.199.97.175:22: installing k0s worker"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.250.107.162:22: installing k0s worker"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.250.107.162:22: starting service"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.199.97.175:22: starting service"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.199.97.175:22: waiting for node to become ready"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="[ssh] 54.250.107.162:22: waiting for node to become ready"
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [1m30s elapsed]
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [1m40s elapsed]
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [1m50s elapsed]
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [2m0s elapsed]
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [2m10s elapsed]
module.k0sctl.terraform_data.k0sctl_apply: Still creating... [2m20s elapsed]
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Release exclusive host lock"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Running phase: Disconnect from hosts"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="==> Finished in 2m29s"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="k0s cluster version v1.27.4+k0s.0 is now installed"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg="Tip: To access the cluster you can now fetch the admin kubeconfig using:"
module.k0sctl.terraform_data.k0sctl_apply (local-exec): level=info msg=" k0sctl kubeconfig"
module.k0sctl.terraform_data.k0sctl_apply: Creation complete after 2m30s [id=0cedcf6e-e53d-1595-7c3e-fc7374a4da55]
module.k0sctl.data.external.k0s_kubeconfig: Reading...
module.k0sctl.terraform_data.konnectivity_available: Creating...
module.k0sctl.terraform_data.konnectivity_available: Provisioning with 'remote-exec'...
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Connecting to remote host via SSH...
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Host: 13.230.139.181
module.k0sctl.terraform_data.konnectivity_available (remote-exec): User: admin
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Password: false
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Private key: true
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Certificate: false
module.k0sctl.terraform_data.konnectivity_available (remote-exec): SSH Agent: false
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Checking Host Key: false
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Target Platform: unix
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Connected!
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Expecting 2 pods with 3 controller connections each ...
module.k0sctl.data.external.k0s_kubeconfig: Read complete after 0s [id=-]
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Pod: pod/konnectivity-agent-64d8r, seen controllers: 3
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Pod: pod/konnectivity-agent-xjxs5, seen controllers: 3
module.k0sctl.terraform_data.konnectivity_available (remote-exec): Seen pods with expected amount of controller connections: 2
module.k0sctl.terraform_data.konnectivity_available: Creation complete after 1s [id=d82c5cd7-5281-3853-755f-3986fe1d9f41]
Apply complete! Resources: 21 added, 0 changed, 0 destroyed.
Outputs:
hosts = tolist([
{
"connection" = {
"type" = "ssh"
"username" = "admin"
}
"ipv4" = "13.230.139.181"
"is_controller" = true
"is_worker" = false
"name" = "on-lab-controller-1"
"role" = "controller"
},
{
"connection" = {
"type" = "ssh"
"username" = "admin"
}
"ipv4" = "54.238.146.34"
"is_controller" = true
"is_worker" = false
"name" = "on-lab-controller-2"
"role" = "controller"
},
{
"connection" = {
"type" = "ssh"
"username" = "admin"
}
"ipv4" = "52.195.216.31"
"is_controller" = true
"is_worker" = false
"name" = "on-lab-controller-3"
"role" = "controller"
},
{
"connection" = {
"type" = "ssh"
"username" = "admin"
}
"ipv4" = "54.199.97.175"
"is_controller" = false
"is_worker" = true
"name" = "on-lab-worker-1"
"role" = "worker"
},
{
"connection" = {
"type" = "ssh"
"username" = "admin"
}
"ipv4" = "54.250.107.162"
"is_controller" = false
"is_worker" = true
"name" = "on-lab-worker-2"
"role" = "worker"
},
])k0s_kubeconfig = <sensitive>
k0sctl_config = {
"apiVersion" = "k0sctl.k0sproject.io/v1beta1"
"kind" = "Cluster"
"metadata" = {
"name" = "k0s-cluster"
}
"spec" = {
"hosts" = [
{
"role" = "controller"
"ssh" = {
"address" = "13.230.139.181"
"keyPath" = "/Users/yoshimin/.cache/k0s-ostests/aws-on-lab-ssh-private-key.pem"
"port" = 22
"user" = "admin"
}
"uploadBinary" = false
},
{
"role" = "controller"
"ssh" = {
"address" = "54.238.146.34"
"keyPath" = "/Users/yoshimin/.cache/k0s-ostests/aws-on-lab-ssh-private-key.pem"
"port" = 22
"user" = "admin"
}
"uploadBinary" = false
},
{
"role" = "controller"
"ssh" = {
"address" = "52.195.216.31"
"keyPath" = "/Users/yoshimin/.cache/k0s-ostests/aws-on-lab-ssh-private-key.pem"
"port" = 22
"user" = "admin"
}
"uploadBinary" = false
},
{
"role" = "worker"
"ssh" = {
"address" = "54.199.97.175"
"keyPath" = "/Users/yoshimin/.cache/k0s-ostests/aws-on-lab-ssh-private-key.pem"
"port" = 22
"user" = "admin"
}
"uploadBinary" = false
},
{
"role" = "worker"
"ssh" = {
"address" = "54.250.107.162"
"keyPath" = "/Users/yoshimin/.cache/k0s-ostests/aws-on-lab-ssh-private-key.pem"
"port" = 22
"user" = "admin"
}
"uploadBinary" = false
},
]
"k0s" = {
"config" = {
"spec" = {
"network" = {
"nodeLocalLoadBalancing" = {
"enabled" = true
}
"podCIDR" = "10.244.0.0/16"
"provider" = "calico"
}
"telemetry" = {
"enabled" = false
}
}
}
"version" = "v1.27.4+k0s.0"
}
}
}
ssh_private_key_filename = "/Users/yoshimin/.cache/k0s-ostests/aws-on-lab-ssh-private-key.pem"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment