Skip to content

Instantly share code, notes, and snippets.

@kesor
Created September 1, 2020 12:09
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kesor/953d33ce28f1563e55072e4eb91bafe2 to your computer and use it in GitHub Desktop.
Save kesor/953d33ce28f1563e55072e4eb91bafe2 to your computer and use it in GitHub Desktop.
kops cluster update duplicate IAM roles
I0901 15:06:00.194412 65709 factory.go:68] state store s3://mybucket.domain.com/mycluster
I0901 15:06:00.194584 65709 s3context.go:337] GOOS="darwin", assuming not running on EC2
I0901 15:06:00.194592 65709 s3context.go:170] defaulting region to "us-east-1"
I0901 15:06:00.987020 65709 s3context.go:210] found bucket in region "us-east-1"
I0901 15:06:00.987048 65709 s3fs.go:284] Reading file "s3://mybucket.domain.com/mycluster/mycluster.domain.com/config"
I0901 15:06:01.770731 65709 s3fs.go:321] Listing objects in S3 bucket "mybucket.domain.com" with prefix "mycluster/mycluster.domain.com/instancegroup/"
I0901 15:06:01.978948 65709 s3fs.go:349] Listed files in s3://mybucket.domain.com/mycluster/mycluster.domain.com/instancegroup: [s3://mybucket.domain.com/mycluster/mycluster.domain.com/instancegroup/master-us-east-1a s3://mybucket.domain.com/mycluster/mycluster.domain.com/instancegroup/nodes s3://mybucket.domain.com/mycluster/mycluster.domain.com/instancegroup/second]
I0901 15:06:01.978996 65709 s3fs.go:284] Reading file "s3://mybucket.domain.com/mycluster/mycluster.domain.com/instancegroup/master-us-east-1a"
I0901 15:06:02.175711 65709 s3fs.go:284] Reading file "s3://mybucket.domain.com/mycluster/mycluster.domain.com/instancegroup/nodes"
I0901 15:06:02.354270 65709 s3fs.go:284] Reading file "s3://mybucket.domain.com/mycluster/mycluster.domain.com/instancegroup/second"
I0901 15:06:02.524872 65709 channel.go:99] resolving "stable" against default channel location "https://raw.githubusercontent.com/kubernetes/kops/master/channels/"
I0901 15:06:02.524888 65709 channel.go:104] Loading channel from "https://raw.githubusercontent.com/kubernetes/kops/master/channels/stable"
I0901 15:06:02.524901 65709 context.go:179] Performing HTTP request: GET https://raw.githubusercontent.com/kubernetes/kops/master/channels/stable
I0901 15:06:03.014899 65709 channel.go:113] Channel contents: spec:
images:
# We put the "legacy" version first, for kops versions that don't support versions ( < 1.5.0 )
- name: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2017-07-28
providerID: aws
kubernetesVersion: ">=1.4.0 <1.5.0"
- name: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2018-08-17
providerID: aws
kubernetesVersion: ">=1.5.0 <1.6.0"
- name: kope.io/k8s-1.6-debian-jessie-amd64-hvm-ebs-2018-08-17
providerID: aws
kubernetesVersion: ">=1.6.0 <1.7.0"
- name: kope.io/k8s-1.7-debian-jessie-amd64-hvm-ebs-2018-08-17
providerID: aws
kubernetesVersion: ">=1.7.0 <1.8.0"
- name: kope.io/k8s-1.8-debian-stretch-amd64-hvm-ebs-2018-08-17
providerID: aws
kubernetesVersion: ">=1.8.0 <1.9.0"
- name: kope.io/k8s-1.9-debian-stretch-amd64-hvm-ebs-2018-08-17
providerID: aws
kubernetesVersion: ">=1.9.0 <1.10.0"
- name: kope.io/k8s-1.10-debian-stretch-amd64-hvm-ebs-2018-08-17
providerID: aws
kubernetesVersion: ">=1.10.0 <1.11.0"
# Stretch is the default for 1.11 (for nvme)
- name: kope.io/k8s-1.11-debian-stretch-amd64-hvm-ebs-2020-07-20
providerID: aws
kubernetesVersion: ">=1.11.0 <1.12.0"
- name: kope.io/k8s-1.12-debian-stretch-amd64-hvm-ebs-2020-07-20
providerID: aws
kubernetesVersion: ">=1.12.0 <1.13.0"
- name: kope.io/k8s-1.13-debian-stretch-amd64-hvm-ebs-2020-07-20
providerID: aws
kubernetesVersion: ">=1.13.0 <1.14.0"
- name: kope.io/k8s-1.14-debian-stretch-amd64-hvm-ebs-2020-07-20
providerID: aws
kubernetesVersion: ">=1.14.0 <1.15.0"
- name: kope.io/k8s-1.15-debian-stretch-amd64-hvm-ebs-2020-07-20
providerID: aws
kubernetesVersion: ">=1.15.0 <1.16.0"
- name: kope.io/k8s-1.16-debian-stretch-amd64-hvm-ebs-2020-07-20
providerID: aws
kubernetesVersion: ">=1.16.0 <1.17.0"
- name: kope.io/k8s-1.17-debian-stretch-amd64-hvm-ebs-2020-07-20
providerID: aws
kubernetesVersion: ">=1.17.0 <1.18.0"
- name: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20200716
providerID: aws
kubernetesVersion: ">=1.18.0"
- providerID: gce
kubernetesVersion: "<1.16.0-alpha.1"
name: "cos-cloud/cos-stable-65-10323-99-0"
- providerID: gce
kubernetesVersion: ">=1.16.0-alpha.1"
name: "cos-cloud/cos-stable-77-12371-114-0"
cluster:
kubernetesVersion: v1.5.8
networking:
kubenet: {}
kubernetesVersions:
- range: ">=1.18.0"
recommendedVersion: 1.18.8
requiredVersion: 1.18.0
- range: ">=1.17.0"
recommendedVersion: 1.17.11
requiredVersion: 1.17.0
- range: ">=1.16.0"
recommendedVersion: 1.16.14
requiredVersion: 1.16.0
- range: ">=1.15.0"
recommendedVersion: 1.15.12
requiredVersion: 1.15.0
- range: ">=1.14.0"
recommendedVersion: 1.14.10
requiredVersion: 1.14.0
- range: ">=1.13.0"
recommendedVersion: 1.13.12
requiredVersion: 1.13.0
- range: ">=1.12.0"
recommendedVersion: 1.12.10
requiredVersion: 1.12.0
- range: ">=1.11.0"
recommendedVersion: 1.11.10
requiredVersion: 1.11.0
- range: "<1.11.0"
recommendedVersion: 1.11.10
requiredVersion: 1.11.10
kopsVersions:
- range: ">=1.19.0-alpha.1"
#recommendedVersion: "1.19.0-alpha.1"
#requiredVersion: 1.19.0
kubernetesVersion: 1.19.0-rc.1
- range: ">=1.18.0-alpha.1"
recommendedVersion: "1.18.0"
#requiredVersion: 1.18.0
kubernetesVersion: 1.18.8
- range: ">=1.17.0-alpha.1"
recommendedVersion: "1.17.1"
#requiredVersion: 1.17.0
kubernetesVersion: 1.17.11
- range: ">=1.16.0-alpha.1"
recommendedVersion: "1.16.4"
#requiredVersion: 1.16.0
kubernetesVersion: 1.16.14
- range: ">=1.15.0-alpha.1"
recommendedVersion: "1.15.3"
#requiredVersion: 1.15.0
kubernetesVersion: 1.15.12
- range: ">=1.14.0-alpha.1"
#recommendedVersion: "1.14.0"
#requiredVersion: 1.14.0
kubernetesVersion: 1.14.10
- range: ">=1.13.0-alpha.1"
#recommendedVersion: "1.13.0"
#requiredVersion: 1.13.0
kubernetesVersion: 1.13.12
- range: ">=1.12.0-alpha.1"
recommendedVersion: "1.12.1"
#requiredVersion: 1.12.0
kubernetesVersion: 1.12.10
- range: ">=1.11.0-alpha.1"
recommendedVersion: "1.11.1"
#requiredVersion: 1.11.0
kubernetesVersion: 1.11.10
- range: "<1.11.0-alpha.1"
recommendedVersion: "1.11.1"
#requiredVersion: 1.10.0
kubernetesVersion: 1.11.10
I0901 15:06:03.015419 65709 populate_cluster_spec.go:371] Defaulted KubeControllerManager.ClusterCIDR to 100.96.0.0/11
I0901 15:06:03.015434 65709 populate_cluster_spec.go:378] Defaulted ServiceClusterIPRange to 100.64.0.0/13
I0901 15:06:03.017371 65709 aws_cloud.go:1246] Querying EC2 for all valid zones in region "us-east-1"
I0901 15:06:03.017624 65709 request_logger.go:45] AWS request: ec2/DescribeAvailabilityZones
I0901 15:06:03.935291 65709 subnets.go:49] All subnets have CIDRs; skipping assignment logic
I0901 15:06:03.935322 65709 defaults.go:224] Not setting up Proxy Excludes
I0901 15:06:03.935380 65709 aws_cloud.go:1246] Querying EC2 for all valid zones in region "us-east-1"
I0901 15:06:03.935512 65709 request_logger.go:45] AWS request: ec2/DescribeAvailabilityZones
I0901 15:06:04.142290 65709 tagbuilder.go:95] tags: [_aws _k8s_1_6]
I0901 15:06:04.142509 65709 options_loader.go:130] executing builder *components.DefaultsOptionsBuilder
I0901 15:06:04.142522 65709 options_loader.go:130] executing builder *components.EtcdOptionsBuilder
I0901 15:06:04.142538 65709 options_loader.go:130] executing builder *etcdmanager.EtcdManagerOptionsBuilder
I0901 15:06:04.142553 65709 options_loader.go:130] executing builder *nodeauthorizer.OptionsBuilder
I0901 15:06:04.142562 65709 options_loader.go:130] executing builder *components.KubeAPIServerOptionsBuilder
I0901 15:06:04.142621 65709 options_loader.go:130] executing builder *components.DockerOptionsBuilder
I0901 15:06:04.142631 65709 options_loader.go:130] executing builder *components.NetworkingOptionsBuilder
I0901 15:06:04.142638 65709 options_loader.go:130] executing builder *components.KubeDnsOptionsBuilder
I0901 15:06:04.142650 65709 options_loader.go:130] executing builder *components.KubeletOptionsBuilder
I0901 15:06:04.142670 65709 kubelet.go:171] Cloud Provider: aws
I0901 15:06:04.142688 65709 options_loader.go:130] executing builder *components.KubeControllerManagerOptionsBuilder
I0901 15:06:04.142700 65709 kubecontrollermanager.go:74] Kubernetes version "1.17.11" supports AttachDetachReconcileSyncPeriod; will configure
I0901 15:06:04.142713 65709 kubecontrollermanager.go:79] AttachDetachReconcileSyncPeriod is not set; will set to default 1m0s
I0901 15:06:04.142732 65709 options_loader.go:130] executing builder *components.KubeSchedulerOptionsBuilder
I0901 15:06:04.142748 65709 options_loader.go:130] executing builder *components.KubeProxyOptionsBuilder
I0901 15:06:04.143637 65709 options_loader.go:130] executing builder *components.DefaultsOptionsBuilder
I0901 15:06:04.143668 65709 options_loader.go:130] executing builder *components.EtcdOptionsBuilder
I0901 15:06:04.143695 65709 options_loader.go:130] executing builder *etcdmanager.EtcdManagerOptionsBuilder
I0901 15:06:04.143718 65709 options_loader.go:130] executing builder *nodeauthorizer.OptionsBuilder
I0901 15:06:04.143737 65709 options_loader.go:130] executing builder *components.KubeAPIServerOptionsBuilder
I0901 15:06:04.143806 65709 options_loader.go:130] executing builder *components.DockerOptionsBuilder
I0901 15:06:04.143820 65709 options_loader.go:130] executing builder *components.NetworkingOptionsBuilder
I0901 15:06:04.143829 65709 options_loader.go:130] executing builder *components.KubeDnsOptionsBuilder
I0901 15:06:04.143837 65709 options_loader.go:130] executing builder *components.KubeletOptionsBuilder
I0901 15:06:04.143850 65709 kubelet.go:171] Cloud Provider: aws
I0901 15:06:04.143881 65709 options_loader.go:130] executing builder *components.KubeControllerManagerOptionsBuilder
I0901 15:06:04.143894 65709 kubecontrollermanager.go:74] Kubernetes version "1.17.11" supports AttachDetachReconcileSyncPeriod; will configure
I0901 15:06:04.143939 65709 options_loader.go:130] executing builder *components.KubeSchedulerOptionsBuilder
I0901 15:06:04.143949 65709 options_loader.go:130] executing builder *components.KubeProxyOptionsBuilder
I0901 15:06:04.144910 65709 spec_builder.go:49] options: {
"channel": "stable",
"configBase": "s3://mybucket.domain.com/mycluster/mycluster.domain.com",
"cloudProvider": "aws",
"kubernetesVersion": "1.17.11",
"subnets": [
{
"name": "us-east-1a",
"cidr": "172.31.2.0/27",
"zone": "us-east-1a",
"egress": "nat-XXXXXXXXXXXXXXXXX",
"type": "Private"
},
{
"name": "utility-us-east-1a",
"cidr": "172.31.1.0/27",
"zone": "us-east-1a",
"type": "Utility"
}
],
"masterPublicName": "api.mycluster.domain.com",
"masterInternalName": "api.internal.mycluster.domain.com",
"networkCIDR": "172.31.0.0/16",
"networkID": "vpc-XXXXXXXX",
"topology": {
"masters": "private",
"nodes": "private",
"dns": {
"type": "Private"
}
},
"secretStore": "s3://mybucket.domain.com/mycluster/mycluster.domain.com/secrets",
"keyStore": "s3://mybucket.domain.com/mycluster/mycluster.domain.com/pki",
"configStore": "s3://mybucket.domain.com/mycluster/mycluster.domain.com",
"dnsZone": "domain.com",
"clusterDNSDomain": "cluster.local",
"serviceClusterIPRange": "100.64.0.0/13",
"nonMasqueradeCIDR": "100.64.0.0/10",
"kubernetesApiAccess": [
"0.0.0.0/0"
],
"additionalPolicies": {
"master": "[\n {\n \"Effect\": \"Allow\",\n \"Action\": [\"dynamodb:List*\"],\n \"Resource\": [\"*\"]\n }\n]\n",
"node": "[\n {\n \"Effect\": \"Allow\",\n \"Action\": [\"dynamodb:List*\"],\n \"Resource\": [\"*\"]\n }\n]\n"
},
"etcdClusters": [
{
"name": "main",
"provider": "Manager",
"etcdMembers": [
{
"name": "a",
"instanceGroup": "master-us-east-1a"
}
],
"enableEtcdTLS": true,
"enableTLSAuth": true,
"version": "3.4.3",
"backups": {
"backupStore": "s3://mybucket.domain.com/mycluster/mycluster.domain.com/backups/etcd/main"
},
"manager": {},
"memoryRequest": "100Mi",
"cpuRequest": "200m"
},
{
"name": "events",
"provider": "Manager",
"etcdMembers": [
{
"name": "a",
"instanceGroup": "master-us-east-1a"
}
],
"enableEtcdTLS": true,
"enableTLSAuth": true,
"version": "3.4.3",
"backups": {
"backupStore": "s3://mybucket.domain.com/mycluster/mycluster.domain.com/backups/etcd/events"
},
"manager": {},
"memoryRequest": "100Mi",
"cpuRequest": "100m"
}
],
"docker": {
"ipMasq": false,
"ipTables": false,
"logDriver": "json-file",
"logLevel": "warn",
"logOpt": [
"max-size=10m",
"max-file=5"
],
"storage": "overlay2,overlay,aufs",
"version": "19.03.11"
},
"kubeDNS": {
"cacheMaxSize": 1000,
"cacheMaxConcurrent": 150,
"domain": "cluster.local",
"replicas": 2,
"serverIP": "100.64.0.10",
"memoryRequest": "70Mi",
"cpuRequest": "100m",
"memoryLimit": "170Mi"
},
"kubeAPIServer": {
"image": "kube-apiserver:v1.17.11",
"logLevel": 2,
"cloudProvider": "aws",
"securePort": 443,
"bindAddress": "0.0.0.0",
"insecureBindAddress": "127.0.0.1",
"enableAdmissionPlugins": [
"NamespaceLifecycle",
"LimitRanger",
"ServiceAccount",
"PersistentVolumeLabel",
"DefaultStorageClass",
"DefaultTolerationSeconds",
"MutatingAdmissionWebhook",
"ValidatingAdmissionWebhook",
"NodeRestriction",
"ResourceQuota"
],
"serviceClusterIPRange": "100.64.0.0/13",
"etcdServers": [
"http://127.0.0.1:4001"
],
"etcdServersOverrides": [
"/events#http://127.0.0.1:4002"
],
"allowPrivileged": true,
"apiServerCount": 1,
"anonymousAuth": false,
"kubeletPreferredAddressTypes": [
"InternalIP",
"Hostname",
"ExternalIP"
],
"storageBackend": "etcd3",
"authorizationMode": "RBAC",
"requestheaderUsernameHeaders": [
"X-Remote-User"
],
"requestheaderGroupHeaders": [
"X-Remote-Group"
],
"requestheaderExtraHeaderPrefixes": [
"X-Remote-Extra-"
],
"requestheaderAllowedNames": [
"aggregator"
]
},
"kubeControllerManager": {
"logLevel": 2,
"image": "kube-controller-manager:v1.17.11",
"cloudProvider": "aws",
"clusterName": "mycluster.domain.com",
"clusterCIDR": "100.96.0.0/11",
"allocateNodeCIDRs": true,
"configureCloudRoutes": false,
"leaderElection": {
"leaderElect": true
},
"attachDetachReconcileSyncPeriod": "1m0s",
"useServiceAccountCredentials": true
},
"kubeScheduler": {
"logLevel": 2,
"image": "kube-scheduler:v1.17.11",
"leaderElection": {
"leaderElect": true
}
},
"kubeProxy": {
"image": "kube-proxy:v1.17.11",
"cpuRequest": "100m",
"logLevel": 2,
"clusterCIDR": "100.96.0.0/11",
"hostnameOverride": "@aws"
},
"kubelet": {
"anonymousAuth": false,
"kubeconfigPath": "/var/lib/kubelet/kubeconfig",
"logLevel": 2,
"podManifestPath": "/etc/kubernetes/manifests",
"hostnameOverride": "@aws",
"podInfraContainerImage": "pause-amd64:3.0",
"enableDebuggingHandlers": true,
"clusterDomain": "cluster.local",
"clusterDNS": "100.64.0.10",
"networkPluginName": "cni",
"cloudProvider": "aws",
"cgroupRoot": "/",
"nonMasqueradeCIDR": "100.64.0.0/10",
"evictionHard": "memory.available\u003c100Mi,nodefs.available\u003c10%,nodefs.inodesFree\u003c5%,imagefs.available\u003c10%,imagefs.inodesFree\u003c5%"
},
"masterKubelet": {
"anonymousAuth": false,
"kubeconfigPath": "/var/lib/kubelet/kubeconfig",
"logLevel": 2,
"podManifestPath": "/etc/kubernetes/manifests",
"hostnameOverride": "@aws",
"podInfraContainerImage": "pause-amd64:3.0",
"enableDebuggingHandlers": true,
"clusterDomain": "cluster.local",
"clusterDNS": "100.64.0.10",
"networkPluginName": "cni",
"cloudProvider": "aws",
"cgroupRoot": "/",
"registerSchedulable": false,
"nonMasqueradeCIDR": "100.64.0.0/10",
"evictionHard": "memory.available\u003c100Mi,nodefs.available\u003c10%,nodefs.inodesFree\u003c5%,imagefs.available\u003c10%,imagefs.inodesFree\u003c5%"
},
"networking": {
"canal": {}
},
"api": {
"loadBalancer": {
"type": "Public"
}
},
"authorization": {
"rbac": {}
},
"cloudLabels": {
"k8sClusterName": "mycluster"
},
"iam": {
"legacy": false,
"allowContainerRegistry": true
}
}
I0901 15:06:04.145258 65709 channel.go:237] version range ">=1.19.0-alpha.1" does not apply to version "1.17.1"; skipping
I0901 15:06:04.145269 65709 channel.go:237] version range ">=1.18.0-alpha.1" does not apply to version "1.17.1"; skipping
I0901 15:06:04.145279 65709 channel.go:163] RecommendedVersion="1.17.1", Have="1.17.1". No upgrade needed.
I0901 15:06:04.145283 65709 channel.go:189] VersionRecommendationSpec does not specify RequiredVersion
I0901 15:06:04.145289 65709 channel.go:216] version range ">=1.18.0" does not apply to version "1.17.11"; skipping
I0901 15:06:04.145295 65709 channel.go:144] RecommendedVersion="1.17.11", Have="1.17.11". No upgrade needed.
I0901 15:06:04.145300 65709 channel.go:182] RequiredVersion="1.17.0", Have="1.17.11". No upgrade needed.
I0901 15:06:04.145376 65709 context.go:179] Performing HTTP request: GET https://storage.googleapis.com/kubernetes-release/release/v1.17.11/bin/linux/amd64/kubelet.sha256
I0901 15:06:04.787724 65709 builder.go:355] Found hash "71bcc8443a6e6f226727ea007fdc4b96327e302d1724a15ce2bffc2d94a3dac6" for "https://storage.googleapis.com/kubernetes-release/release/v1.17.11/bin/linux/amd64/kubelet"
I0901 15:06:04.787754 65709 builder.go:272] adding file: &{DownloadURL:https://storage.googleapis.com/kubernetes-release/release/v1.17.11/bin/linux/amd64/kubelet CanonicalURL:<nil> SHAValue:71bcc8443a6e6f226727ea007fdc4b96327e302d1724a15ce2bffc2d94a3dac6}
I0901 15:06:04.787796 65709 context.go:179] Performing HTTP request: GET https://storage.googleapis.com/kubernetes-release/release/v1.17.11/bin/linux/amd64/kubectl.sha256
I0901 15:06:04.884064 65709 builder.go:355] Found hash "002d640a12d6fad48f7d8a5c56cb4ff656397caf3511f50426d9c69c94b2d137" for "https://storage.googleapis.com/kubernetes-release/release/v1.17.11/bin/linux/amd64/kubectl"
I0901 15:06:04.884109 65709 builder.go:272] adding file: &{DownloadURL:https://storage.googleapis.com/kubernetes-release/release/v1.17.11/bin/linux/amd64/kubectl CanonicalURL:<nil> SHAValue:002d640a12d6fad48f7d8a5c56cb4ff656397caf3511f50426d9c69c94b2d137}
I0901 15:06:04.884191 65709 networking.go:177] Adding default CNI asset for k8s >= 1.15: https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz
I0901 15:06:04.884232 65709 urls.go:81] Using default base url: "https://kubeupv2.s3.amazonaws.com/kops/1.17.1/"
I0901 15:06:04.884259 65709 context.go:179] Performing HTTP request: GET https://kubeupv2.s3.amazonaws.com/kops/1.17.1/linux/amd64/utils.tar.gz.sha256
I0901 15:06:05.644015 65709 builder.go:355] Found hash "320dbc1ff22520e5451c51625d071413f0fdf24ba289f9bf9a3b043fd77fc340" for "https://kubeupv2.s3.amazonaws.com/kops/1.17.1/linux/amd64/utils.tar.gz"
I0901 15:06:05.644084 65709 builder.go:272] adding file: &{DownloadURL:https://kubeupv2.s3.amazonaws.com/kops/1.17.1/linux/amd64/utils.tar.gz CanonicalURL:<nil> SHAValue:320dbc1ff22520e5451c51625d071413f0fdf24ba289f9bf9a3b043fd77fc340}
I0901 15:06:05.644153 65709 urls.go:73] Using cached kopsBaseUrl url: "https://kubeupv2.s3.amazonaws.com/kops/1.17.1/"
I0901 15:06:05.644170 65709 context.go:179] Performing HTTP request: GET https://kubeupv2.s3.amazonaws.com/kops/1.17.1/linux/amd64/nodeup.sha256
I0901 15:06:05.808828 65709 builder.go:355] Found hash "f5d67c367b5b2bfa17689c9431c6409efbac50982760df51d377aa1da8c83d9c" for "https://kubeupv2.s3.amazonaws.com/kops/1.17.1/linux/amd64/nodeup"
I0901 15:06:05.808854 65709 builder.go:272] adding file: &{DownloadURL:https://kubeupv2.s3.amazonaws.com/kops/1.17.1/linux/amd64/nodeup CanonicalURL:<nil> SHAValue:f5d67c367b5b2bfa17689c9431c6409efbac50982760df51d377aa1da8c83d9c}
I0901 15:06:05.808883 65709 urls.go:137] Using default nodeup location: "https://kubeupv2.s3.amazonaws.com/kops/1.17.1/linux/amd64/nodeup"
I0901 15:06:05.808896 65709 urls.go:73] Using cached kopsBaseUrl url: "https://kubeupv2.s3.amazonaws.com/kops/1.17.1/"
I0901 15:06:05.808909 65709 context.go:179] Performing HTTP request: GET https://kubeupv2.s3.amazonaws.com/kops/1.17.1/images/protokube.tar.gz.sha256
I0901 15:06:05.971752 65709 builder.go:355] Found hash "77a7975107bc222926f9d93f5b5e1cea74b70401727b355e6242eb94be0526f4" for "https://kubeupv2.s3.amazonaws.com/kops/1.17.1/images/protokube.tar.gz"
I0901 15:06:05.971805 65709 builder.go:272] adding file: &{DownloadURL:https://kubeupv2.s3.amazonaws.com/kops/1.17.1/images/protokube.tar.gz CanonicalURL:<nil> SHAValue:77a7975107bc222926f9d93f5b5e1cea74b70401727b355e6242eb94be0526f4}
I0901 15:06:05.971881 65709 urls.go:178] Using default protokube location: "https://kubeupv2.s3.amazonaws.com/kops/1.17.1/images/protokube.tar.gz"
I0901 15:06:05.971958 65709 aws_cloud.go:1246] Querying EC2 for all valid zones in region "us-east-1"
I0901 15:06:05.972129 65709 request_logger.go:45] AWS request: ec2/DescribeAvailabilityZones
I0901 15:06:06.180299 65709 s3fs.go:321] Listing objects in S3 bucket "mybucket.domain.com" with prefix "mycluster/mycluster.domain.com/pki/ssh/public/admin/"
I0901 15:06:06.374699 65709 s3fs.go:349] Listed files in s3://mybucket.domain.com/mycluster/mycluster.domain.com/pki/ssh/public/admin: [s3://mybucket.domain.com/mycluster/mycluster.domain.com/pki/ssh/public/admin/79e5cabce79e543d10ef3f64e38eb207]
I0901 15:06:06.374719 65709 s3fs.go:284] Reading file "s3://mybucket.domain.com/mycluster/mycluster.domain.com/pki/ssh/public/admin/79e5cabce79e543d10ef3f64e38eb207"
I0901 15:06:06.548908 65709 dns.go:94] Private DNS: skipping DNS validation
I0901 15:06:06.548926 65709 tagbuilder.go:95] tags: [_aws _k8s_1_6]
I0901 15:06:06.549215 65709 templates.go:80] loading (templated) resource "addons/networking.cilium.io/k8s-1.7.yaml"
I0901 15:06:06.549379 65709 templates.go:80] loading (templated) resource "addons/networking.cilium.io/k8s-1.12.yaml"
I0901 15:06:06.549470 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/pre-k8s-1.6.yaml"
I0901 15:06:06.549618 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.12.yaml"
I0901 15:06:06.549820 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.15.yaml"
I0901 15:06:06.550001 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.16.yaml"
I0901 15:06:06.550090 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.6.yaml"
I0901 15:06:06.550196 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.8.yaml"
I0901 15:06:06.550313 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.9.yaml"
I0901 15:06:06.550349 65709 templates.go:80] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml"
I0901 15:06:06.550376 65709 templates.go:80] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.550406 65709 templates.go:80] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.9.yaml"
I0901 15:06:06.550430 65709 templates.go:88] loading resource "addons/storage-aws.addons.k8s.io/v1.15.0.yaml"
I0901 15:06:06.550450 65709 templates.go:88] loading resource "addons/storage-aws.addons.k8s.io/v1.6.0.yaml"
I0901 15:06:06.550471 65709 templates.go:88] loading resource "addons/storage-aws.addons.k8s.io/v1.7.0.yaml"
I0901 15:06:06.550528 65709 templates.go:88] loading resource "addons/authentication.kope.io/k8s-1.12.yaml"
I0901 15:06:06.550563 65709 templates.go:88] loading resource "addons/authentication.kope.io/k8s-1.8.yaml"
I0901 15:06:06.550602 65709 templates.go:80] loading (templated) resource "addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml"
I0901 15:06:06.550622 65709 templates.go:88] loading resource "addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml"
I0901 15:06:06.550640 65709 templates.go:88] loading resource "addons/limit-range.addons.k8s.io/v1.5.0.yaml"
I0901 15:06:06.550658 65709 templates.go:88] loading resource "addons/limit-range.addons.k8s.io/addon.yaml"
I0901 15:06:06.550673 65709 templates.go:88] loading resource "addons/core.addons.k8s.io/addon.yaml"
I0901 15:06:06.550711 65709 templates.go:80] loading (templated) resource "addons/core.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.550750 65709 templates.go:80] loading (templated) resource "addons/core.addons.k8s.io/k8s-1.7.yaml"
I0901 15:06:06.550769 65709 templates.go:88] loading resource "addons/core.addons.k8s.io/v1.4.0.yaml"
I0901 15:06:06.550821 65709 templates.go:80] loading (templated) resource "addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml"
I0901 15:06:06.550873 65709 templates.go:80] loading (templated) resource "addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.550971 65709 templates.go:80] loading (templated) resource "addons/coredns.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.551049 65709 templates.go:80] loading (templated) resource "addons/coredns.addons.k8s.io/k8s-1.6.yaml"
I0901 15:06:06.551090 65709 templates.go:88] loading resource "addons/rbac.addons.k8s.io/k8s-1.8.yaml"
I0901 15:06:06.551121 65709 templates.go:88] loading resource "addons/scheduler.addons.k8s.io/v1.7.0.yaml"
I0901 15:06:06.551173 65709 templates.go:80] loading (templated) resource "addons/dns-controller.addons.k8s.io/k8s-1.6.yaml"
I0901 15:06:06.551210 65709 templates.go:80] loading (templated) resource "addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml"
I0901 15:06:06.551247 65709 templates.go:80] loading (templated) resource "addons/dns-controller.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.551305 65709 templates.go:80] loading (templated) resource "addons/networking.kuberouter/k8s-1.12.yaml"
I0901 15:06:06.551351 65709 templates.go:80] loading (templated) resource "addons/networking.kuberouter/k8s-1.6.yaml"
I0901 15:06:06.551390 65709 templates.go:80] loading (templated) resource "addons/authentication.aws/k8s-1.10.yaml"
I0901 15:06:06.551430 65709 templates.go:80] loading (templated) resource "addons/authentication.aws/k8s-1.12.yaml"
I0901 15:06:06.551469 65709 templates.go:80] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml"
I0901 15:06:06.551515 65709 templates.go:80] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml"
I0901 15:06:06.551563 65709 templates.go:80] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.16.yaml"
I0901 15:06:06.551620 65709 templates.go:80] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.7.yaml"
I0901 15:06:06.551677 65709 templates.go:80] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.8.yaml"
I0901 15:06:06.551703 65709 templates.go:88] loading resource "addons/openstack.addons.k8s.io/BUILD.bazel"
I0901 15:06:06.551754 65709 templates.go:80] loading (templated) resource "addons/openstack.addons.k8s.io/k8s-1.11.yaml"
I0901 15:06:06.551801 65709 templates.go:80] loading (templated) resource "addons/openstack.addons.k8s.io/k8s-1.13.yaml"
I0901 15:06:06.551858 65709 templates.go:80] loading (templated) resource "addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml"
I0901 15:06:06.551907 65709 templates.go:80] loading (templated) resource "addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml"
I0901 15:06:06.551956 65709 templates.go:80] loading (templated) resource "addons/kops-controller.addons.k8s.io/k8s-1.16.yaml"
I0901 15:06:06.551996 65709 templates.go:80] loading (templated) resource "addons/networking.flannel/pre-k8s-1.6.yaml"
I0901 15:06:06.552040 65709 templates.go:80] loading (templated) resource "addons/networking.flannel/k8s-1.12.yaml"
I0901 15:06:06.552080 65709 templates.go:80] loading (templated) resource "addons/networking.flannel/k8s-1.6.yaml"
I0901 15:06:06.552268 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.12.yaml"
I0901 15:06:06.552491 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.16.yaml"
I0901 15:06:06.552630 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.6.yaml"
I0901 15:06:06.552772 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.7-v3.yaml"
I0901 15:06:06.552901 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.7.yaml"
I0901 15:06:06.552989 65709 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org/pre-k8s-1.6.yaml"
I0901 15:06:06.553213 65709 templates.go:88] loading resource "addons/networking.kope.io/k8s-1.12.yaml"
I0901 15:06:06.553304 65709 templates.go:88] loading resource "addons/networking.kope.io/k8s-1.6.yaml"
I0901 15:06:06.553342 65709 templates.go:88] loading resource "addons/networking.kope.io/pre-k8s-1.6.yaml"
I0901 15:06:06.553439 65709 templates.go:80] loading (templated) resource "addons/networking.weave/k8s-1.12.yaml"
I0901 15:06:06.553527 65709 templates.go:80] loading (templated) resource "addons/networking.weave/k8s-1.6.yaml"
I0901 15:06:06.553634 65709 templates.go:80] loading (templated) resource "addons/networking.weave/k8s-1.7.yaml"
I0901 15:06:06.553737 65709 templates.go:80] loading (templated) resource "addons/networking.weave/k8s-1.8.yaml"
I0901 15:06:06.553808 65709 templates.go:80] loading (templated) resource "addons/networking.weave/pre-k8s-1.6.yaml"
I0901 15:06:06.553870 65709 templates.go:80] loading (templated) resource "addons/external-dns.addons.k8s.io/k8s-1.6.yaml"
I0901 15:06:06.553911 65709 templates.go:80] loading (templated) resource "addons/external-dns.addons.k8s.io/pre-k8s-1.6.yaml"
I0901 15:06:06.553968 65709 templates.go:88] loading resource "addons/external-dns.addons.k8s.io/README.md"
I0901 15:06:06.554142 65709 templates.go:80] loading (templated) resource "addons/external-dns.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.554284 65709 templates.go:80] loading (templated) resource "addons/kube-dns.addons.k8s.io/pre-k8s-1.6.yaml"
I0901 15:06:06.554469 65709 templates.go:80] loading (templated) resource "addons/kube-dns.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.554579 65709 templates.go:80] loading (templated) resource "addons/kube-dns.addons.k8s.io/k8s-1.6.yaml"
I0901 15:06:06.554662 65709 templates.go:80] loading (templated) resource "addons/networking.romana/k8s-1.12.yaml"
I0901 15:06:06.554726 65709 templates.go:80] loading (templated) resource "addons/networking.romana/k8s-1.7.yaml"
I0901 15:06:06.554779 65709 templates.go:88] loading resource "addons/storage-gce.addons.k8s.io/v1.6.0.yaml"
I0901 15:06:06.554807 65709 templates.go:88] loading resource "addons/storage-gce.addons.k8s.io/v1.7.0.yaml"
I0901 15:06:06.554870 65709 tree_walker.go:98] visit "cloudup/resources"
I0901 15:06:06.554883 65709 tree_walker.go:98] visit "cloudup/resources/addons"
I0901 15:06:06.554910 65709 tree_walker.go:98] visit "cloudup/resources/addons/authentication.aws"
I0901 15:06:06.554918 65709 tree_walker.go:98] visit "cloudup/resources/addons/authentication.aws/k8s-1.10.yaml.template"
I0901 15:06:06.555033 65709 loader.go:354] loading (templated) resource "addons/authentication.aws/k8s-1.10.yaml"
I0901 15:06:06.555058 65709 tree_walker.go:98] visit "cloudup/resources/addons/authentication.aws/k8s-1.12.yaml.template"
I0901 15:06:06.555091 65709 loader.go:354] loading (templated) resource "addons/authentication.aws/k8s-1.12.yaml"
I0901 15:06:06.555109 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.amazon-vpc-routed-eni"
I0901 15:06:06.555137 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml.template"
I0901 15:06:06.555186 65709 loader.go:354] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml"
I0901 15:06:06.555192 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml.template"
I0901 15:06:06.555230 65709 loader.go:354] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml"
I0901 15:06:06.555237 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.16.yaml.template"
I0901 15:06:06.555278 65709 loader.go:354] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.16.yaml"
I0901 15:06:06.555282 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.7.yaml.template"
I0901 15:06:06.555323 65709 loader.go:354] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.7.yaml"
I0901 15:06:06.555328 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.8.yaml.template"
I0901 15:06:06.555370 65709 loader.go:354] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.8.yaml"
I0901 15:06:06.555376 65709 tree_walker.go:98] visit "cloudup/resources/addons/openstack.addons.k8s.io"
I0901 15:06:06.555383 65709 tree_walker.go:98] visit "cloudup/resources/addons/openstack.addons.k8s.io/BUILD.bazel"
I0901 15:06:06.555398 65709 loader.go:362] loading resource "addons/openstack.addons.k8s.io/BUILD.bazel"
I0901 15:06:06.555402 65709 tree_walker.go:98] visit "cloudup/resources/addons/openstack.addons.k8s.io/k8s-1.11.yaml.template"
I0901 15:06:06.555437 65709 loader.go:354] loading (templated) resource "addons/openstack.addons.k8s.io/k8s-1.11.yaml"
I0901 15:06:06.555442 65709 tree_walker.go:98] visit "cloudup/resources/addons/openstack.addons.k8s.io/k8s-1.13.yaml.template"
I0901 15:06:06.555483 65709 loader.go:354] loading (templated) resource "addons/openstack.addons.k8s.io/k8s-1.13.yaml"
I0901 15:06:06.555489 65709 tree_walker.go:98] visit "cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io"
I0901 15:06:06.555496 65709 tree_walker.go:98] visit "cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml.template"
I0901 15:06:06.555538 65709 loader.go:354] loading (templated) resource "addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml"
I0901 15:06:06.555543 65709 tree_walker.go:98] visit "cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml.template"
I0901 15:06:06.555583 65709 loader.go:354] loading (templated) resource "addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml"
I0901 15:06:06.555589 65709 tree_walker.go:98] visit "cloudup/resources/addons/kops-controller.addons.k8s.io"
I0901 15:06:06.555595 65709 tree_walker.go:98] visit "cloudup/resources/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml.template"
I0901 15:06:06.555645 65709 loader.go:354] loading (templated) resource "addons/kops-controller.addons.k8s.io/k8s-1.16.yaml"
I0901 15:06:06.555649 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.flannel"
I0901 15:06:06.555657 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template"
I0901 15:06:06.555728 65709 loader.go:354] loading (templated) resource "addons/networking.flannel/k8s-1.12.yaml"
I0901 15:06:06.555733 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.flannel/k8s-1.6.yaml.template"
I0901 15:06:06.555802 65709 loader.go:354] loading (templated) resource "addons/networking.flannel/k8s-1.6.yaml"
I0901 15:06:06.555806 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.flannel/pre-k8s-1.6.yaml.template"
I0901 15:06:06.555844 65709 loader.go:354] loading (templated) resource "addons/networking.flannel/pre-k8s-1.6.yaml"
I0901 15:06:06.555848 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org"
I0901 15:06:06.555857 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org/k8s-1.12.yaml.template"
I0901 15:06:06.556072 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.12.yaml"
I0901 15:06:06.556105 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template"
I0901 15:06:06.556291 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.16.yaml"
I0901 15:06:06.556306 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template"
I0901 15:06:06.556449 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.6.yaml"
I0901 15:06:06.556456 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org/k8s-1.7-v3.yaml.template"
I0901 15:06:06.556601 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.7-v3.yaml"
I0901 15:06:06.556609 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template"
I0901 15:06:06.556745 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.7.yaml"
I0901 15:06:06.556751 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org/pre-k8s-1.6.yaml.template"
I0901 15:06:06.556842 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org/pre-k8s-1.6.yaml"
I0901 15:06:06.556850 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.kope.io"
I0901 15:06:06.556859 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml"
I0901 15:06:06.556897 65709 loader.go:362] loading resource "addons/networking.kope.io/k8s-1.12.yaml"
I0901 15:06:06.556901 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.kope.io/k8s-1.6.yaml"
I0901 15:06:06.556939 65709 loader.go:362] loading resource "addons/networking.kope.io/k8s-1.6.yaml"
I0901 15:06:06.556944 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.kope.io/pre-k8s-1.6.yaml"
I0901 15:06:06.556982 65709 loader.go:362] loading resource "addons/networking.kope.io/pre-k8s-1.6.yaml"
I0901 15:06:06.556987 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave"
I0901 15:06:06.556995 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave/k8s-1.8.yaml.template"
I0901 15:06:06.557046 65709 loader.go:354] loading (templated) resource "addons/networking.weave/k8s-1.8.yaml"
I0901 15:06:06.557053 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave/pre-k8s-1.6.yaml.template"
I0901 15:06:06.557102 65709 loader.go:354] loading (templated) resource "addons/networking.weave/pre-k8s-1.6.yaml"
I0901 15:06:06.557108 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave/k8s-1.12.yaml.template"
I0901 15:06:06.557178 65709 loader.go:354] loading (templated) resource "addons/networking.weave/k8s-1.12.yaml"
I0901 15:06:06.557187 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave/k8s-1.6.yaml.template"
I0901 15:06:06.557229 65709 loader.go:354] loading (templated) resource "addons/networking.weave/k8s-1.6.yaml"
I0901 15:06:06.557235 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave/k8s-1.7.yaml.template"
I0901 15:06:06.557288 65709 loader.go:354] loading (templated) resource "addons/networking.weave/k8s-1.7.yaml"
I0901 15:06:06.557296 65709 tree_walker.go:98] visit "cloudup/resources/addons/external-dns.addons.k8s.io"
I0901 15:06:06.557305 65709 tree_walker.go:98] visit "cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.6.yaml.template"
I0901 15:06:06.557337 65709 loader.go:354] loading (templated) resource "addons/external-dns.addons.k8s.io/k8s-1.6.yaml"
I0901 15:06:06.557344 65709 tree_walker.go:98] visit "cloudup/resources/addons/external-dns.addons.k8s.io/pre-k8s-1.6.yaml.template"
I0901 15:06:06.557376 65709 loader.go:354] loading (templated) resource "addons/external-dns.addons.k8s.io/pre-k8s-1.6.yaml"
I0901 15:06:06.557381 65709 tree_walker.go:98] visit "cloudup/resources/addons/external-dns.addons.k8s.io/README.md"
I0901 15:06:06.557431 65709 loader.go:362] loading resource "addons/external-dns.addons.k8s.io/README.md"
I0901 15:06:06.557436 65709 tree_walker.go:98] visit "cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.12.yaml.template"
I0901 15:06:06.557483 65709 loader.go:354] loading (templated) resource "addons/external-dns.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.557487 65709 tree_walker.go:98] visit "cloudup/resources/addons/kube-dns.addons.k8s.io"
I0901 15:06:06.557494 65709 tree_walker.go:98] visit "cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.12.yaml.template"
I0901 15:06:06.557574 65709 loader.go:354] loading (templated) resource "addons/kube-dns.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.557582 65709 tree_walker.go:98] visit "cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml.template"
I0901 15:06:06.557659 65709 loader.go:354] loading (templated) resource "addons/kube-dns.addons.k8s.io/k8s-1.6.yaml"
I0901 15:06:06.557665 65709 tree_walker.go:98] visit "cloudup/resources/addons/kube-dns.addons.k8s.io/pre-k8s-1.6.yaml.template"
I0901 15:06:06.557733 65709 loader.go:354] loading (templated) resource "addons/kube-dns.addons.k8s.io/pre-k8s-1.6.yaml"
I0901 15:06:06.557740 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.romana"
I0901 15:06:06.557747 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.romana/k8s-1.7.yaml.template"
I0901 15:06:06.557795 65709 loader.go:354] loading (templated) resource "addons/networking.romana/k8s-1.7.yaml"
I0901 15:06:06.557801 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.romana/k8s-1.12.yaml.template"
I0901 15:06:06.557859 65709 loader.go:354] loading (templated) resource "addons/networking.romana/k8s-1.12.yaml"
I0901 15:06:06.557867 65709 tree_walker.go:98] visit "cloudup/resources/addons/storage-gce.addons.k8s.io"
I0901 15:06:06.557874 65709 tree_walker.go:98] visit "cloudup/resources/addons/storage-gce.addons.k8s.io/v1.6.0.yaml"
I0901 15:06:06.557896 65709 loader.go:362] loading resource "addons/storage-gce.addons.k8s.io/v1.6.0.yaml"
I0901 15:06:06.557900 65709 tree_walker.go:98] visit "cloudup/resources/addons/storage-gce.addons.k8s.io/v1.7.0.yaml"
I0901 15:06:06.557924 65709 loader.go:362] loading resource "addons/storage-gce.addons.k8s.io/v1.7.0.yaml"
I0901 15:06:06.557928 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal"
I0901 15:06:06.557964 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/pre-k8s-1.6.yaml.template"
I0901 15:06:06.558077 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/pre-k8s-1.6.yaml"
I0901 15:06:06.558086 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.12.yaml.template"
I0901 15:06:06.558215 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.12.yaml"
I0901 15:06:06.558221 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.15.yaml.template"
I0901 15:06:06.558364 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.15.yaml"
I0901 15:06:06.558378 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.16.yaml.template"
I0901 15:06:06.558529 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.16.yaml"
I0901 15:06:06.558540 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.6.yaml.template"
I0901 15:06:06.558621 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.6.yaml"
I0901 15:06:06.558628 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.8.yaml.template"
I0901 15:06:06.558738 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.8.yaml"
I0901 15:06:06.558747 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.9.yaml.template"
I0901 15:06:06.558848 65709 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.9.yaml"
I0901 15:06:06.558854 65709 tree_walker.go:98] visit "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io"
I0901 15:06:06.558863 65709 tree_walker.go:98] visit "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml.template"
I0901 15:06:06.558888 65709 loader.go:354] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml"
I0901 15:06:06.558894 65709 tree_walker.go:98] visit "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.12.yaml.template"
I0901 15:06:06.558919 65709 loader.go:354] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.558923 65709 tree_walker.go:98] visit "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.9.yaml.template"
I0901 15:06:06.558953 65709 loader.go:354] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.9.yaml"
I0901 15:06:06.558959 65709 tree_walker.go:98] visit "cloudup/resources/addons/storage-aws.addons.k8s.io"
I0901 15:06:06.558967 65709 tree_walker.go:98] visit "cloudup/resources/addons/storage-aws.addons.k8s.io/v1.15.0.yaml"
I0901 15:06:06.558991 65709 loader.go:362] loading resource "addons/storage-aws.addons.k8s.io/v1.15.0.yaml"
I0901 15:06:06.558995 65709 tree_walker.go:98] visit "cloudup/resources/addons/storage-aws.addons.k8s.io/v1.6.0.yaml"
I0901 15:06:06.559014 65709 loader.go:362] loading resource "addons/storage-aws.addons.k8s.io/v1.6.0.yaml"
I0901 15:06:06.559019 65709 tree_walker.go:98] visit "cloudup/resources/addons/storage-aws.addons.k8s.io/v1.7.0.yaml"
I0901 15:06:06.559038 65709 loader.go:362] loading resource "addons/storage-aws.addons.k8s.io/v1.7.0.yaml"
I0901 15:06:06.559042 65709 tree_walker.go:98] visit "cloudup/resources/addons/authentication.kope.io"
I0901 15:06:06.559049 65709 tree_walker.go:98] visit "cloudup/resources/addons/authentication.kope.io/k8s-1.12.yaml"
I0901 15:06:06.559087 65709 loader.go:362] loading resource "addons/authentication.kope.io/k8s-1.12.yaml"
I0901 15:06:06.559091 65709 tree_walker.go:98] visit "cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml"
I0901 15:06:06.559130 65709 loader.go:362] loading resource "addons/authentication.kope.io/k8s-1.8.yaml"
I0901 15:06:06.559135 65709 tree_walker.go:98] visit "cloudup/resources/addons/digitalocean-cloud-controller.addons.k8s.io"
I0901 15:06:06.559141 65709 tree_walker.go:98] visit "cloudup/resources/addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml.template"
I0901 15:06:06.559180 65709 loader.go:354] loading (templated) resource "addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml"
I0901 15:06:06.559188 65709 tree_walker.go:98] visit "cloudup/resources/addons/kubelet-api.rbac.addons.k8s.io"
I0901 15:06:06.559194 65709 tree_walker.go:98] visit "cloudup/resources/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml"
I0901 15:06:06.559211 65709 loader.go:362] loading resource "addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml"
I0901 15:06:06.559215 65709 tree_walker.go:98] visit "cloudup/resources/addons/limit-range.addons.k8s.io"
I0901 15:06:06.559222 65709 tree_walker.go:98] visit "cloudup/resources/addons/limit-range.addons.k8s.io/addon.yaml"
I0901 15:06:06.559272 65709 loader.go:362] loading resource "addons/limit-range.addons.k8s.io/addon.yaml"
I0901 15:06:06.559276 65709 tree_walker.go:98] visit "cloudup/resources/addons/limit-range.addons.k8s.io/v1.5.0.yaml"
I0901 15:06:06.559312 65709 loader.go:362] loading resource "addons/limit-range.addons.k8s.io/v1.5.0.yaml"
I0901 15:06:06.559316 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.cilium.io"
I0901 15:06:06.559323 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.cilium.io/k8s-1.12.yaml.template"
I0901 15:06:06.559489 65709 loader.go:354] loading (templated) resource "addons/networking.cilium.io/k8s-1.12.yaml"
I0901 15:06:06.559495 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.cilium.io/k8s-1.7.yaml.template"
I0901 15:06:06.559704 65709 loader.go:354] loading (templated) resource "addons/networking.cilium.io/k8s-1.7.yaml"
I0901 15:06:06.559711 65709 tree_walker.go:98] visit "cloudup/resources/addons/core.addons.k8s.io"
I0901 15:06:06.559737 65709 tree_walker.go:98] visit "cloudup/resources/addons/core.addons.k8s.io/addon.yaml"
I0901 15:06:06.559756 65709 loader.go:362] loading resource "addons/core.addons.k8s.io/addon.yaml"
I0901 15:06:06.559760 65709 tree_walker.go:98] visit "cloudup/resources/addons/core.addons.k8s.io/k8s-1.12.yaml.template"
I0901 15:06:06.559832 65709 loader.go:354] loading (templated) resource "addons/core.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.559836 65709 tree_walker.go:98] visit "cloudup/resources/addons/core.addons.k8s.io/k8s-1.7.yaml.template"
I0901 15:06:06.559887 65709 loader.go:354] loading (templated) resource "addons/core.addons.k8s.io/k8s-1.7.yaml"
I0901 15:06:06.559891 65709 tree_walker.go:98] visit "cloudup/resources/addons/core.addons.k8s.io/v1.4.0.yaml"
I0901 15:06:06.559938 65709 loader.go:362] loading resource "addons/core.addons.k8s.io/v1.4.0.yaml"
I0901 15:06:06.559943 65709 tree_walker.go:98] visit "cloudup/resources/addons/node-authorizer.addons.k8s.io"
I0901 15:06:06.559950 65709 tree_walker.go:98] visit "cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml.template"
I0901 15:06:06.560034 65709 loader.go:354] loading (templated) resource "addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml"
I0901 15:06:06.560055 65709 tree_walker.go:98] visit "cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml.template"
I0901 15:06:06.560100 65709 loader.go:354] loading (templated) resource "addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.560105 65709 tree_walker.go:98] visit "cloudup/resources/addons/coredns.addons.k8s.io"
I0901 15:06:06.560112 65709 tree_walker.go:98] visit "cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template"
I0901 15:06:06.560198 65709 loader.go:354] loading (templated) resource "addons/coredns.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.560206 65709 tree_walker.go:98] visit "cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template"
I0901 15:06:06.560252 65709 loader.go:354] loading (templated) resource "addons/coredns.addons.k8s.io/k8s-1.6.yaml"
I0901 15:06:06.560261 65709 tree_walker.go:98] visit "cloudup/resources/addons/rbac.addons.k8s.io"
I0901 15:06:06.560268 65709 tree_walker.go:98] visit "cloudup/resources/addons/rbac.addons.k8s.io/k8s-1.8.yaml"
I0901 15:06:06.560293 65709 loader.go:362] loading resource "addons/rbac.addons.k8s.io/k8s-1.8.yaml"
I0901 15:06:06.560298 65709 tree_walker.go:98] visit "cloudup/resources/addons/scheduler.addons.k8s.io"
I0901 15:06:06.560304 65709 tree_walker.go:98] visit "cloudup/resources/addons/scheduler.addons.k8s.io/v1.7.0.yaml"
I0901 15:06:06.560332 65709 loader.go:362] loading resource "addons/scheduler.addons.k8s.io/v1.7.0.yaml"
I0901 15:06:06.560336 65709 tree_walker.go:98] visit "cloudup/resources/addons/dns-controller.addons.k8s.io"
I0901 15:06:06.560344 65709 tree_walker.go:98] visit "cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml.template"
I0901 15:06:06.560384 65709 loader.go:354] loading (templated) resource "addons/dns-controller.addons.k8s.io/k8s-1.6.yaml"
I0901 15:06:06.560389 65709 tree_walker.go:98] visit "cloudup/resources/addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml.template"
I0901 15:06:06.560416 65709 loader.go:354] loading (templated) resource "addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml"
I0901 15:06:06.560421 65709 tree_walker.go:98] visit "cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml.template"
I0901 15:06:06.560453 65709 loader.go:354] loading (templated) resource "addons/dns-controller.addons.k8s.io/k8s-1.12.yaml"
I0901 15:06:06.560458 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.kuberouter"
I0901 15:06:06.560464 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.kuberouter/k8s-1.12.yaml.template"
I0901 15:06:06.560514 65709 loader.go:354] loading (templated) resource "addons/networking.kuberouter/k8s-1.12.yaml"
I0901 15:06:06.560520 65709 tree_walker.go:98] visit "cloudup/resources/addons/networking.kuberouter/k8s-1.6.yaml.template"
I0901 15:06:06.560561 65709 loader.go:354] loading (templated) resource "addons/networking.kuberouter/k8s-1.6.yaml"
I0901 15:06:06.560845 65709 proxy.go:30] proxies is == nil, returning empty list
I0901 15:06:06.561513 65709 visitor.go:35] string value at spec.template.spec.hostNetwork: true
I0901 15:06:06.561523 65709 images.go:59] Consider image for re-mapping: "kope/kops-controller:1.17.1"
I0901 15:06:06.561533 65709 visitor.go:40] float64 value at spec.updateStrategy.rollingUpdate.maxUnavailable: 1.000000
I0901 15:06:06.563286 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: v1
data:
config.yaml: |
{"cloud":"aws","configBase":"s3://mybucket.domain.com/mycluster/mycluster.domain.com"}
kind: ConfigMap
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.17.1
name: kops-controller
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kops-controller
template:
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.17.1
spec:
containers:
- command:
- /usr/bin/kops-controller
- --v=2
- --conf=/etc/kubernetes/kops-controller/config.yaml
image: kope/kops-controller:1.17.1
name: kops-controller
resources:
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- mountPath: /etc/kubernetes/kops-controller/
name: kops-controller-config
dnsPolicy: Default
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
priorityClassName: system-node-critical
serviceAccount: kops-controller
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
volumes:
- configMap:
name: kops-controller
name: kops-controller-config
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
resourceNames:
- kops-controller-leader
resources:
- configmaps
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
I0901 15:06:06.563311 65709 bootstrapchannelbuilder.go:81] hash 93b3f4900cc55fc822600c821f4f17eeae190808
I0901 15:06:06.563417 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: v1
kind: Namespace
metadata:
name: kube-system
I0901 15:06:06.563426 65709 bootstrapchannelbuilder.go:81] hash 3ffe9ac576f9eec72e2bdfbd2ea17d56d9b17b90
I0901 15:06:06.564276 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.0.0"
I0901 15:06:06.564526 65709 visitor.go:40] float64 value at spec.strategy.rollingUpdate.maxUnavailable: 0.000000
I0901 15:06:06.564535 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.httpGet.port: 8081.000000
I0901 15:06:06.564539 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.initialDelaySeconds: 3.000000
I0901 15:06:06.564541 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.564546 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/kubedns-amd64:1.9"
I0901 15:06:06.564557 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 5.000000
I0901 15:06:06.564560 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.httpGet.port: 8080.000000
I0901 15:06:06.564563 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 60.000000
I0901 15:06:06.564565 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.successThreshold: 1.000000
I0901 15:06:06.564567 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.564570 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[0].containerPort: 10053.000000
I0901 15:06:06.564573 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[1].containerPort: 10053.000000
I0901 15:06:06.564575 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[2].containerPort: 10055.000000
I0901 15:06:06.564579 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.10"
I0901 15:06:06.564587 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.564590 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.failureThreshold: 5.000000
I0901 15:06:06.564592 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.httpGet.port: 8080.000000
I0901 15:06:06.564595 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.initialDelaySeconds: 60.000000
I0901 15:06:06.564597 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.successThreshold: 1.000000
I0901 15:06:06.564600 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[0].containerPort: 53.000000
I0901 15:06:06.564602 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[1].containerPort: 53.000000
I0901 15:06:06.564606 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/dnsmasq-metrics-amd64:1.0"
I0901 15:06:06.564613 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.failureThreshold: 5.000000
I0901 15:06:06.564616 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.httpGet.port: 10054.000000
I0901 15:06:06.564618 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.initialDelaySeconds: 60.000000
I0901 15:06:06.564621 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.successThreshold: 1.000000
I0901 15:06:06.564623 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.564626 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].ports.[0].containerPort: 10054.000000
I0901 15:06:06.564630 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/exechealthz-amd64:1.2"
I0901 15:06:06.564636 65709 visitor.go:40] float64 value at spec.template.spec.containers.[3].ports.[0].containerPort: 8080.000000
I0901 15:06:06.565281 65709 visitor.go:40] float64 value at spec.ports.[0].port: 53.000000
I0901 15:06:06.565289 65709 visitor.go:40] float64 value at spec.ports.[1].port: 53.000000
I0901 15:06:06.565419 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns-autoscaler
kubernetes.io/cluster-service: "true"
name: kube-dns-autoscaler
namespace: kube-system
spec:
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly",
"operator":"Exists"}]'
labels:
k8s-app: kube-dns-autoscaler
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
- --mode=linear
- --target=Deployment/kube-dns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":2}}
- --logtostderr=true
- --v=2
image: cluster-proportional-autoscaler-amd64:1.0.0
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
name: kube-dns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly",
"operator":"Exists"}]'
labels:
k8s-app: kube-dns
spec:
containers:
- args:
- --domain=cluster.local.
- --dns-port=10053
- --config-map=kube-dns
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
image: kubedns-amd64:1.9
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz-kubedns
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: kubedns
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
initialDelaySeconds: 3
timeoutSeconds: 5
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
- args:
- --cache-size=1000
- --dns-forward-max=150
- --no-resolv
- --server=127.0.0.1#10053
- --log-facility=-
- --min-port=1024
image: k8s-dns-dnsmasq-amd64:1.14.10
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz-dnsmasq
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: dnsmasq
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
resources:
requests:
cpu: 150m
memory: 10Mi
- args:
- --v=2
- --logtostderr
image: dnsmasq-metrics-amd64:1.0
livenessProbe:
failureThreshold: 5
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: dnsmasq-metrics
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 10Mi
- args:
- --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
- --url=/healthz-dnsmasq
- --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null
- --url=/healthz-kubedns
- --port=8080
- --quiet
image: exechealthz-amd64:1.2
name: healthz
ports:
- containerPort: 8080
protocol: TCP
resources:
limits:
memory: 50Mi
requests:
cpu: 10m
memory: 50Mi
dnsPolicy: Default
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: KubeDNS
name: kube-dns
namespace: kube-system
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
selector:
k8s-app: kube-dns
I0901 15:06:06.565439 65709 bootstrapchannelbuilder.go:81] hash 10164ca81697c39f5e9eb661caa2b64304e80485
I0901 15:06:06.566601 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2"
I0901 15:06:06.566823 65709 visitor.go:40] float64 value at spec.strategy.rollingUpdate.maxUnavailable: 0.000000
I0901 15:06:06.566834 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[0].containerPort: 10053.000000
I0901 15:06:06.566838 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[1].containerPort: 10053.000000
I0901 15:06:06.566840 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[2].containerPort: 10055.000000
I0901 15:06:06.566843 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.httpGet.port: 8081.000000
I0901 15:06:06.566846 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.initialDelaySeconds: 3.000000
I0901 15:06:06.566848 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.566851 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10"
I0901 15:06:06.566861 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 5.000000
I0901 15:06:06.566864 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.httpGet.port: 10054.000000
I0901 15:06:06.566867 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 60.000000
I0901 15:06:06.566869 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.successThreshold: 1.000000
I0901 15:06:06.566871 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.566881 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[0].containerPort: 53.000000
I0901 15:06:06.566884 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[1].containerPort: 53.000000
I0901 15:06:06.566891 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10"
I0901 15:06:06.566899 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.566901 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.failureThreshold: 5.000000
I0901 15:06:06.566904 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.httpGet.port: 10054.000000
I0901 15:06:06.566906 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.initialDelaySeconds: 60.000000
I0901 15:06:06.566909 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.successThreshold: 1.000000
I0901 15:06:06.566913 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10"
I0901 15:06:06.566919 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.failureThreshold: 5.000000
I0901 15:06:06.566922 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.httpGet.port: 10054.000000
I0901 15:06:06.566924 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.initialDelaySeconds: 60.000000
I0901 15:06:06.566927 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.successThreshold: 1.000000
I0901 15:06:06.566929 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.566932 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].ports.[0].containerPort: 10054.000000
I0901 15:06:06.566935 65709 visitor.go:35] string value at spec.template.spec.volumes.[0].configMap.optional: true
I0901 15:06:06.567547 65709 visitor.go:40] float64 value at spec.ports.[0].port: 53.000000
I0901 15:06:06.567552 65709 visitor.go:40] float64 value at spec.ports.[1].port: 53.000000
I0901 15:06:06.567906 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns-autoscaler
kubernetes.io/cluster-service: "true"
name: kube-dns-autoscaler
namespace: kube-system
spec:
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly",
"operator":"Exists"}]'
labels:
k8s-app: kube-dns-autoscaler
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
- --target=Deployment/kube-dns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
image: cluster-proportional-autoscaler-amd64:1.1.2-r2
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
serviceAccountName: kube-dns-autoscaler
tolerations:
- key: CriticalAddonsOnly
operator: Exists
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
name: kube-dns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
template:
metadata:
annotations:
prometheus.io/port: "10055"
prometheus.io/scrape: "true"
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly",
"operator":"Exists"}]'
labels:
k8s-app: kube-dns
spec:
containers:
- args:
- --config-dir=/kube-dns-config
- --dns-port=10053
- --domain=cluster.local.
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
image: k8s-dns-kube-dns-amd64:1.14.10
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: kubedns
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
initialDelaySeconds: 3
timeoutSeconds: 5
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
volumeMounts:
- mountPath: /kube-dns-config
name: kube-dns-config
- args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --dns-forward-max=150
- --no-negcache
- --log-facility=-
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/in6.arpa/127.0.0.1#10053
- --min-port=1024
image: k8s-dns-dnsmasq-nanny-amd64:1.14.10
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: dnsmasq
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- mountPath: /etc/k8s/dns/dnsmasq-nanny
name: kube-dns-config
- args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
image: k8s-dns-sidecar-amd64:1.14.10
livenessProbe:
failureThreshold: 5
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: sidecar
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
cpu: 10m
memory: 20Mi
dnsPolicy: Default
serviceAccountName: kube-dns
volumes:
- configMap:
name: kube-dns
optional: true
name: kube-dns-config
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: KubeDNS
name: kube-dns
namespace: kube-system
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
selector:
k8s-app: kube-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
name: kube-dns-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
name: kube-dns-autoscaler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- apiGroups:
- ""
resources:
- replicationcontrollers/scale
verbs:
- get
- update
- apiGroups:
- extensions
resources:
- deployments/scale
- replicasets/scale
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
name: kube-dns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-dns-autoscaler
subjects:
- kind: ServiceAccount
name: kube-dns-autoscaler
namespace: kube-system
I0901 15:06:06.567919 65709 bootstrapchannelbuilder.go:81] hash f54490d6367f1948ab34f626a90e7f29d2bbb718
I0901 15:06:06.568935 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0"
I0901 15:06:06.569141 65709 visitor.go:40] float64 value at spec.strategy.rollingUpdate.maxUnavailable: 0.000000
I0901 15:06:06.569149 65709 visitor.go:35] string value at spec.template.spec.volumes.[0].configMap.optional: true
I0901 15:06:06.569153 65709 visitor.go:40] float64 value at spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.[0].weight: 1.000000
I0901 15:06:06.569158 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 5.000000
I0901 15:06:06.569162 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.httpGet.port: 10054.000000
I0901 15:06:06.569164 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 60.000000
I0901 15:06:06.569167 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.successThreshold: 1.000000
I0901 15:06:06.569169 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.569173 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[0].containerPort: 10053.000000
I0901 15:06:06.569175 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[1].containerPort: 10053.000000
I0901 15:06:06.569178 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[2].containerPort: 10055.000000
I0901 15:06:06.569181 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.httpGet.port: 8081.000000
I0901 15:06:06.569183 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.initialDelaySeconds: 3.000000
I0901 15:06:06.569185 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.569189 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.13"
I0901 15:06:06.569202 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.13"
I0901 15:06:06.569210 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.httpGet.port: 10054.000000
I0901 15:06:06.569212 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.initialDelaySeconds: 60.000000
I0901 15:06:06.569215 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.successThreshold: 1.000000
I0901 15:06:06.569217 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.569219 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.failureThreshold: 5.000000
I0901 15:06:06.569223 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[0].containerPort: 53.000000
I0901 15:06:06.569240 65709 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[1].containerPort: 53.000000
I0901 15:06:06.569244 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].ports.[0].containerPort: 10054.000000
I0901 15:06:06.569247 65709 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.13"
I0901 15:06:06.569254 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.timeoutSeconds: 5.000000
I0901 15:06:06.569257 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.failureThreshold: 5.000000
I0901 15:06:06.569260 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.httpGet.port: 10054.000000
I0901 15:06:06.569262 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.initialDelaySeconds: 60.000000
I0901 15:06:06.569265 65709 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.successThreshold: 1.000000
I0901 15:06:06.569940 65709 visitor.go:40] float64 value at spec.ports.[0].port: 53.000000
I0901 15:06:06.569945 65709 visitor.go:40] float64 value at spec.ports.[1].port: 53.000000
I0901 15:06:06.570298 65709 visitor.go:40] float64 value at spec.minAvailable: 1.000000
I0901 15:06:06.570356 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns-autoscaler
kubernetes.io/cluster-service: "true"
name: kube-dns-autoscaler
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns-autoscaler
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: kube-dns-autoscaler
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
- --target=Deployment/kube-dns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
image: cluster-proportional-autoscaler-amd64:1.4.0
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
priorityClassName: system-cluster-critical
serviceAccountName: kube-dns-autoscaler
tolerations:
- key: CriticalAddonsOnly
operator: Exists
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
name: kube-dns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
template:
metadata:
annotations:
prometheus.io/port: "10055"
prometheus.io/scrape: "true"
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: kube-dns
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: kubernetes.io/hostname
weight: 1
containers:
- args:
- --config-dir=/kube-dns-config
- --dns-port=10053
- --domain=cluster.local.
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
image: k8s-dns-kube-dns-amd64:1.14.13
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: kubedns
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
initialDelaySeconds: 3
timeoutSeconds: 5
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
volumeMounts:
- mountPath: /kube-dns-config
name: kube-dns-config
- args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --dns-forward-max=150
- --no-negcache
- --log-facility=-
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/in6.arpa/127.0.0.1#10053
- --min-port=1024
image: k8s-dns-dnsmasq-nanny-amd64:1.14.13
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: dnsmasq
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- mountPath: /etc/k8s/dns/dnsmasq-nanny
name: kube-dns-config
- args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
image: k8s-dns-sidecar-amd64:1.14.13
livenessProbe:
failureThreshold: 5
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: sidecar
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
cpu: 10m
memory: 20Mi
dnsPolicy: Default
priorityClassName: system-cluster-critical
serviceAccountName: kube-dns
volumes:
- configMap:
name: kube-dns
optional: true
name: kube-dns-config
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: KubeDNS
name: kube-dns
namespace: kube-system
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
selector:
k8s-app: kube-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
name: kube-dns-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
name: kube-dns-autoscaler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- replicationcontrollers/scale
verbs:
- get
- update
- apiGroups:
- extensions
- apps
resources:
- deployments/scale
- replicasets/scale
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: kube-dns.addons.k8s.io
name: kube-dns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-dns-autoscaler
subjects:
- kind: ServiceAccount
name: kube-dns-autoscaler
namespace: kube-system
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: kube-dns
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: kube-dns
I0901 15:06:06.570371 65709 bootstrapchannelbuilder.go:81] hash 03ddc2e1ae96da9cf2beb459ef709ac7fb0428ab
I0901 15:06:06.570536 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
k8s-addon: rbac.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: kubelet-cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet
I0901 15:06:06.570542 65709 bootstrapchannelbuilder.go:81] hash 5d53ce7b920cd1e8d65d2306d80a041420711914
I0901 15:06:06.570654 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kops:system:kubelet-api-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet-api
I0901 15:06:06.570661 65709 bootstrapchannelbuilder.go:81] hash e1508d77cb4e527d7a2939babe36dc350dd83745
I0901 15:06:06.570760 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: v1
kind: LimitRange
metadata:
name: limits
namespace: default
spec:
limits:
- defaultRequest:
cpu: 100m
type: Container
I0901 15:06:06.570765 65709 bootstrapchannelbuilder.go:81] hash 2ea50e23f1a5aa41df3724630ac25173738cc90c
I0901 15:06:06.570817 65709 template_functions.go:252] watch-ingress=false set on dns-controller
I0901 15:06:06.570979 65709 visitor.go:35] string value at spec.template.spec.hostNetwork: true
I0901 15:06:06.570986 65709 images.go:59] Consider image for re-mapping: "kope/dns-controller:1.17.1"
I0901 15:06:06.570993 65709 visitor.go:40] float64 value at spec.replicas: 1.000000
I0901 15:06:06.571188 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.17.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value":
"master"}]'
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.17.1
spec:
containers:
- command:
- /usr/bin/dns-controller
- --watch-ingress=false
- --dns=aws-route53
- --zone=domain.com
- --zone=*/*
- -v=2
image: kope/dns-controller:1.17.1
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: Default
hostNetwork: true
nodeSelector:
kubernetes.io/role: master
I0901 15:06:06.571197 65709 bootstrapchannelbuilder.go:81] hash e4464cf4a77ce9298ee4519dbe9e37e4836bf08b
I0901 15:06:06.571300 65709 template_functions.go:252] watch-ingress=false set on dns-controller
I0901 15:06:06.571656 65709 visitor.go:40] float64 value at spec.replicas: 1.000000
I0901 15:06:06.571666 65709 images.go:59] Consider image for re-mapping: "kope/dns-controller:1.17.1"
I0901 15:06:06.571673 65709 visitor.go:35] string value at spec.template.spec.hostNetwork: true
I0901 15:06:06.572193 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.17.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value":
"master"}]'
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.17.1
spec:
containers:
- command:
- /usr/bin/dns-controller
- --watch-ingress=false
- --dns=aws-route53
- --zone=domain.com
- --zone=*/*
- -v=2
image: kope/dns-controller:1.17.1
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: Default
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
serviceAccount: dns-controller
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller
I0901 15:06:06.572202 65709 bootstrapchannelbuilder.go:81] hash f817d40d42fcf700e1907622f01d568056a8d0b7
I0901 15:06:06.572307 65709 template_functions.go:252] watch-ingress=false set on dns-controller
I0901 15:06:06.572659 65709 visitor.go:40] float64 value at spec.replicas: 1.000000
I0901 15:06:06.572667 65709 visitor.go:35] string value at spec.template.spec.hostNetwork: true
I0901 15:06:06.572674 65709 images.go:59] Consider image for re-mapping: "kope/dns-controller:1.17.1"
I0901 15:06:06.573200 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.17.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
strategy:
type: Recreate
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.17.1
spec:
containers:
- command:
- /usr/bin/dns-controller
- --watch-ingress=false
- --dns=aws-route53
- --zone=domain.com
- --zone=*/*
- -v=2
image: kope/dns-controller:1.17.1
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: Default
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
priorityClassName: system-cluster-critical
serviceAccount: dns-controller
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller
I0901 15:06:06.573208 65709 bootstrapchannelbuilder.go:81] hash 19e3db26dd76df210d43bff46832dbe47402af62
I0901 15:06:06.573547 65709 visitor.go:35] string value at allowVolumeExpansion: true
I0901 15:06:06.574412 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
labels:
k8s-addon: storage-aws.addons.k8s.io
name: default
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.beta.kubernetes.io/is-default-class: "false"
labels:
k8s-addon: storage-aws.addons.k8s.io
name: gp2
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
k8s-addon: storage-aws.addons.k8s.io
name: kops-ssd-1-17
parameters:
encrypted: "true"
type: gp2
provisioner: kubernetes.io/aws-ebs
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:aws-cloud-provider
subjects:
- kind: ServiceAccount
name: aws-cloud-provider
namespace: kube-system
I0901 15:06:06.574429 65709 bootstrapchannelbuilder.go:81] hash 00cf6e46e25b736b2da93c6025ce482474d83904
I0901 15:06:06.574928 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
labels:
k8s-addon: storage-aws.addons.k8s.io
name: default
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
k8s-addon: storage-aws.addons.k8s.io
name: gp2
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
I0901 15:06:06.574945 65709 bootstrapchannelbuilder.go:81] hash 62705a596142e6cc283280e8aa973e51536994c5
I0901 15:06:06.575151 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
labels:
k8s-addon: storage-aws.addons.k8s.io
name: default
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
k8s-addon: storage-aws.addons.k8s.io
name: gp2
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
I0901 15:06:06.575162 65709 bootstrapchannelbuilder.go:81] hash 7de4b2eb0521d669172038759c521418711d8266
I0901 15:06:06.575920 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[0].readOnly: true
I0901 15:06:06.575926 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[1].readOnly: false
I0901 15:06:06.575936 65709 images.go:59] Consider image for re-mapping: "quay.io/calico/node:v2.4.1"
I0901 15:06:06.575955 65709 visitor.go:35] string value at spec.template.spec.containers.[0].securityContext.privileged: true
I0901 15:06:06.575961 65709 images.go:59] Consider image for re-mapping: "quay.io/calico/cni:v1.10.0"
I0901 15:06:06.575970 65709 images.go:59] Consider image for re-mapping: "quay.io/coreos/flannel:v0.9.1"
I0901 15:06:06.575977 65709 visitor.go:35] string value at spec.template.spec.containers.[2].securityContext.privileged: true
I0901 15:06:06.575984 65709 visitor.go:35] string value at spec.template.spec.hostNetwork: true
I0901 15:06:06.576581 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: v1
data:
canal_iface: ""
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"hostname": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
masquerade: "true"
net-conf.json: |
{
"Network": "100.64.0.0/10",
"Backend": {
"Type": "vxlan"
}
}
kind: ConfigMap
metadata:
name: canal-config
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
k8s-app: canal
name: canal
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key": "CriticalAddonsOnly", "operator": "Exists"}]
labels:
k8s-app: canal
spec:
containers:
- env:
- name: DATASTORE_TYPE
value: kubernetes
- name: FELIX_LOGSEVERITYSYS
value: INFO
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
- name: FELIX_IPV6SUPPORT
value: "false"
- name: CALICO_NETWORKING_BACKEND
value: none
- name: CLUSTER_TYPE
value: kops,canal
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
- name: IP
value: ""
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: ACCEPT
- name: FELIX_CHAININSERTMODE
value: insert
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "false"
- name: FELIX_PROMETHEUSMETRICSPORT
value: "9091"
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "true"
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "true"
image: calico/node:v2.4.1
name: calico-node
resources:
requests:
cpu: 100m
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- command:
- /install-cni.sh
env:
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
key: cni_network_config
name: canal-config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: calico/cni:v1.10.0
name: install-cni
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- command:
- /opt/bin/flanneld
- --ip-masq
- --kube-subnet-mgr
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
key: canal_iface
name: canal-config
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
key: masquerade
name: canal-config
image: coreos/flannel:v0.9.1
name: kube-flannel
securityContext:
privileged: true
volumeMounts:
- mountPath: /run
name: run
- mountPath: /etc/kube-flannel/
name: flannel-cfg
hostNetwork: true
volumes:
- hostPath:
path: /lib/modules
name: lib-modules
- hostPath:
path: /var/run/calico
name: var-run-calico
- hostPath:
path: /opt/cni/bin
name: cni-bin-dir
- hostPath:
path: /etc/cni/net.d
name: cni-net-dir
- hostPath:
path: /run
name: run
- configMap:
name: canal-config
name: flannel-cfg
I0901 15:06:06.576591 65709 bootstrapchannelbuilder.go:81] hash c11bf79aacfbcf10501262f6d2d00de9e3ef9c81
I0901 15:06:06.577653 65709 visitor.go:35] string value at spec.template.spec.containers.[0].securityContext.privileged: true
I0901 15:06:06.577658 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[0].readOnly: true
I0901 15:06:06.577661 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[1].readOnly: false
I0901 15:06:06.577670 65709 images.go:59] Consider image for re-mapping: "quay.io/calico/node:v2.4.1"
I0901 15:06:06.577689 65709 images.go:59] Consider image for re-mapping: "quay.io/calico/cni:v1.10.0"
I0901 15:06:06.577700 65709 images.go:59] Consider image for re-mapping: "quay.io/coreos/flannel:v0.9.0"
I0901 15:06:06.577707 65709 visitor.go:35] string value at spec.template.spec.containers.[2].securityContext.privileged: true
I0901 15:06:06.577711 65709 visitor.go:35] string value at spec.template.spec.hostNetwork: true
I0901 15:06:06.578839 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: v1
data:
canal_iface: ""
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"hostname": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
masquerade: "true"
net-conf.json: |
{
"Network": "100.64.0.0/10",
"Backend": {
"Type": "vxlan"
}
}
kind: ConfigMap
metadata:
name: canal-config
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
k8s-app: canal
name: canal
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: canal
spec:
containers:
- env:
- name: DATASTORE_TYPE
value: kubernetes
- name: FELIX_LOGSEVERITYSYS
value: INFO
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
- name: FELIX_IPV6SUPPORT
value: "false"
- name: CALICO_NETWORKING_BACKEND
value: none
- name: CLUSTER_TYPE
value: kops,canal
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
- name: IP
value: ""
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: ACCEPT
- name: FELIX_CHAININSERTMODE
value: insert
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "false"
- name: FELIX_PROMETHEUSMETRICSPORT
value: "9091"
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "true"
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "true"
image: calico/node:v2.4.1
name: calico-node
resources:
requests:
cpu: 100m
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- command:
- /install-cni.sh
env:
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
key: cni_network_config
name: canal-config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: calico/cni:v1.10.0
name: install-cni
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- command:
- /opt/bin/flanneld
- --ip-masq
- --kube-subnet-mgr
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
key: canal_iface
name: canal-config
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
key: masquerade
name: canal-config
image: coreos/flannel:v0.9.0
name: kube-flannel
securityContext:
privileged: true
volumeMounts:
- mountPath: /run
name: run
- mountPath: /etc/kube-flannel/
name: flannel-cfg
hostNetwork: true
serviceAccountName: canal
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /lib/modules
name: lib-modules
- hostPath:
path: /var/run/calico
name: var-run-calico
- hostPath:
path: /opt/cni/bin
name: cni-bin-dir
- hostPath:
path: /etc/cni/net.d
name: cni-net-dir
- hostPath:
path: /run
name: run
- configMap:
name: canal-config
name: flannel-cfg
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: calico
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups:
- extensions
resources:
- thirdpartyresources
verbs:
- create
- get
- list
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- projectcalico.org
resources:
- globalbgppeers
verbs:
- get
- list
- apiGroups:
- projectcalico.org
resources:
- globalconfigs
- globalbgpconfigs
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- projectcalico.org
resources:
- ippools
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- alpha.projectcalico.org
resources:
- systemnetworkpolicies
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
I0901 15:06:06.578861 65709 bootstrapchannelbuilder.go:81] hash dfb24be5023efcedf2a8c14ff77f40d0e409033c
I0901 15:06:06.580204 65709 visitor.go:35] string value at spec.template.spec.hostNetwork: true
I0901 15:06:06.580210 65709 visitor.go:40] float64 value at spec.template.spec.terminationGracePeriodSeconds: 0.000000
I0901 15:06:06.580220 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.httpGet.port: 9099.000000
I0901 15:06:06.580223 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.periodSeconds: 10.000000
I0901 15:06:06.580226 65709 visitor.go:35] string value at spec.template.spec.containers.[0].securityContext.privileged: true
I0901 15:06:06.580228 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[0].readOnly: true
I0901 15:06:06.580231 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[1].readOnly: false
I0901 15:06:06.580240 65709 images.go:59] Consider image for re-mapping: "quay.io/calico/node:v2.6.7"
I0901 15:06:06.580252 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 6.000000
I0901 15:06:06.580255 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.httpGet.port: 9099.000000
I0901 15:06:06.580257 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 10.000000
I0901 15:06:06.580259 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.periodSeconds: 10.000000
I0901 15:06:06.580269 65709 images.go:59] Consider image for re-mapping: "quay.io/calico/cni:v1.11.2"
I0901 15:06:06.580281 65709 images.go:59] Consider image for re-mapping: "quay.io/coreos/flannel:v0.9.0"
I0901 15:06:06.580288 65709 visitor.go:35] string value at spec.template.spec.containers.[2].securityContext.privileged: true
I0901 15:06:06.581696 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: v1
data:
canal_iface: ""
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"capabilities": {"portMappings": true},
"snat": true
}
]
}
masquerade: "true"
net-conf.json: |
{
"Network": "100.64.0.0/10",
"Backend": {
"Type": "vxlan"
}
}
kind: ConfigMap
metadata:
name: canal-config
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
k8s-app: canal
name: canal
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: canal
spec:
containers:
- env:
- name: DATASTORE_TYPE
value: kubernetes
- name: FELIX_LOGSEVERITYSYS
value: INFO
- name: CALICO_NETWORKING_BACKEND
value: none
- name: CLUSTER_TYPE
value: kops,canal
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
- name: FELIX_IPV6SUPPORT
value: "false"
- name: WAIT_FOR_DATASTORE
value: "true"
- name: IP
value: ""
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: ACCEPT
- name: FELIX_CHAININSERTMODE
value: insert
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "false"
- name: FELIX_PROMETHEUSMETRICSPORT
value: "9091"
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "true"
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "true"
- name: FELIX_HEALTHENABLED
value: "true"
image: calico/node:v2.6.7
livenessProbe:
failureThreshold: 6
httpGet:
path: /liveness
port: 9099
initialDelaySeconds: 10
periodSeconds: 10
name: calico-node
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
resources:
requests:
cpu: 50m
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- command:
- /install-cni.sh
env:
- name: CNI_CONF_NAME
value: 10-calico.conflist
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
key: cni_network_config
name: canal-config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: calico/cni:v1.11.2
name: install-cni
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- command:
- /opt/bin/flanneld
- --ip-masq
- --kube-subnet-mgr
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
key: canal_iface
name: canal-config
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
key: masquerade
name: canal-config
image: coreos/flannel:v0.9.0
name: kube-flannel
resources:
limits:
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /run
name: run
- mountPath: /etc/kube-flannel/
name: flannel-cfg
hostNetwork: true
serviceAccountName: canal
terminationGracePeriodSeconds: 0
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /lib/modules
name: lib-modules
- hostPath:
path: /var/run/calico
name: var-run-calico
- hostPath:
path: /opt/cni/bin
name: cni-bin-dir
- hostPath:
path: /etc/cni/net.d
name: cni-net-dir
- hostPath:
path: /run
name: run
- configMap:
name: canal-config
name: flannel-cfg
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalfelixconfigs.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalFelixConfig
plural: globalfelixconfigs
singular: globalfelixconfig
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalbgpconfigs.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalBGPConfig
plural: globalbgpconfigs
singular: globalbgpconfig
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPPool
plural: ippools
singular: ippool
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
scope: Cluster
version: v1
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: calico
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- globalfelixconfigs
- bgppeers
- globalbgpconfigs
- ippools
- globalnetworkpolicies
verbs:
- create
- get
- list
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
I0901 15:06:06.581712 65709 bootstrapchannelbuilder.go:81] hash 25d329d1169eeb82b8e44db1cc50e48e32639d95
I0901 15:06:06.583456 65709 visitor.go:40] float64 value at spec.template.spec.terminationGracePeriodSeconds: 0.000000
I0901 15:06:06.583471 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[0].readOnly: true
I0901 15:06:06.583475 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[1].readOnly: false
I0901 15:06:06.583478 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[2].readOnly: false
I0901 15:06:06.583488 65709 images.go:59] Consider image for re-mapping: "quay.io/calico/node:v3.2.3"
I0901 15:06:06.583500 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 6.000000
I0901 15:06:06.583504 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.httpGet.port: 9099.000000
I0901 15:06:06.583507 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 10.000000
I0901 15:06:06.583509 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.periodSeconds: 10.000000
I0901 15:06:06.583512 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.httpGet.port: 9099.000000
I0901 15:06:06.583514 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.periodSeconds: 10.000000
I0901 15:06:06.583517 65709 visitor.go:35] string value at spec.template.spec.containers.[0].securityContext.privileged: true
I0901 15:06:06.583522 65709 images.go:59] Consider image for re-mapping: "quay.io/calico/cni:v3.2.3"
I0901 15:06:06.583534 65709 images.go:59] Consider image for re-mapping: "quay.io/coreos/flannel:v0.9.0"
I0901 15:06:06.583541 65709 visitor.go:35] string value at spec.template.spec.containers.[2].securityContext.privileged: true
I0901 15:06:06.583545 65709 visitor.go:35] string value at spec.template.spec.hostNetwork: true
I0901 15:06:06.583548 65709 visitor.go:40] float64 value at spec.updateStrategy.rollingUpdate.maxUnavailable: 1.000000
I0901 15:06:06.586386 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: v1
data:
canal_iface: ""
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
masquerade: "true"
net-conf.json: |
{
"Network": "100.64.0.0/10",
"Backend": {
"Type": "vxlan"
}
}
kind: ConfigMap
metadata:
name: canal-config
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: canal
name: canal
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: canal
spec:
containers:
- env:
- name: DATASTORE_TYPE
value: kubernetes
- name: WAIT_FOR_DATASTORE
value: "true"
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
value: none
- name: CLUSTER_TYPE
value: k8s,canal
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
- name: IP
value: ""
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_LOGSEVERITYSCREEN
value: INFO
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: ACCEPT
- name: FELIX_CHAININSERTMODE
value: insert
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "false"
- name: FELIX_PROMETHEUSMETRICSPORT
value: "9091"
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "true"
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "true"
- name: FELIX_HEALTHENABLED
value: "true"
image: calico/node:v3.2.3
livenessProbe:
failureThreshold: 6
httpGet:
host: localhost
path: /liveness
port: 9099
initialDelaySeconds: 10
periodSeconds: 10
name: calico-node
readinessProbe:
httpGet:
host: localhost
path: /readiness
port: 9099
periodSeconds: 10
resources:
requests:
cpu: 250m
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- command:
- /install-cni.sh
env:
- name: CNI_CONF_NAME
value: 10-canal.conflist
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
key: cni_network_config
name: canal-config
image: calico/cni:v3.2.3
name: install-cni
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- command:
- /opt/bin/flanneld
- --ip-masq
- --kube-subnet-mgr
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
key: canal_iface
name: canal-config
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
key: masquerade
name: canal-config
image: coreos/flannel:v0.9.0
name: kube-flannel
securityContext:
privileged: true
volumeMounts:
- mountPath: /run
name: run
- mountPath: /etc/kube-flannel/
name: flannel-cfg
hostNetwork: true
nodeSelector:
beta.kubernetes.io/os: linux
serviceAccountName: canal
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- hostPath:
path: /lib/modules
name: lib-modules
- hostPath:
path: /var/run/calico
name: var-run-calico
- hostPath:
path: /var/lib/calico
name: var-lib-calico
- hostPath:
path: /run
name: run
- configMap:
name: canal-config
name: flannel-cfg
- hostPath:
path: /opt/cni/bin
name: cni-bin-dir
- hostPath:
path: /etc/cni/net.d
name: cni-net-dir
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico
rules:
- apiGroups:
- ""
resources:
- namespaces
- serviceaccounts
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- globalnetworksets
- hostendpoints
- bgpconfigurations
- ippools
- globalnetworkpolicies
- networkpolicies
- clusterinformations
verbs:
- create
- get
- list
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPPool
plural: ippools
singular: ippool
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
scope: Namespaced
version: v1
I0901 15:06:06.586430 65709 bootstrapchannelbuilder.go:81] hash e0b3bae219c413737e777ab37bfaad6deb143112
I0901 15:06:06.589227 65709 images.go:59] Consider image for re-mapping: "calico/node:v3.7.5"
I0901 15:06:06.589237 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.httpGet.port: 9099.000000
I0901 15:06:06.589241 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 10.000000
I0901 15:06:06.589243 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.periodSeconds: 10.000000
I0901 15:06:06.589245 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 6.000000
I0901 15:06:06.589248 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.httpGet.port: 9099.000000
I0901 15:06:06.589250 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.periodSeconds: 10.000000
I0901 15:06:06.589253 65709 visitor.go:35] string value at spec.template.spec.containers.[0].securityContext.privileged: true
I0901 15:06:06.589256 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[0].readOnly: true
I0901 15:06:06.589258 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[1].readOnly: false
I0901 15:06:06.589261 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[2].readOnly: false
I0901 15:06:06.589263 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[3].readOnly: false
I0901 15:06:06.589266 65709 images.go:59] Consider image for re-mapping: "quay.io/coreos/flannel:v0.11.0"
I0901 15:06:06.589283 65709 visitor.go:35] string value at spec.template.spec.containers.[1].securityContext.privileged: true
I0901 15:06:06.589286 65709 visitor.go:35] string value at spec.template.spec.containers.[1].volumeMounts.[0].readOnly: false
I0901 15:06:06.589305 65709 visitor.go:40] float64 value at spec.template.spec.terminationGracePeriodSeconds: 0.000000
I0901 15:06:06.589327 65709 visitor.go:35] string value at spec.template.spec.hostNetwork: true
I0901 15:06:06.589333 65709 images.go:59] Consider image for re-mapping: "calico/cni:v3.7.5"
I0901 15:06:06.589341 65709 visitor.go:40] float64 value at spec.updateStrategy.rollingUpdate.maxUnavailable: 1.000000
I0901 15:06:06.590163 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: v1
data:
canal_iface: ""
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"mtu": __CNI_MTU__,
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
masquerade: "true"
net-conf.json: |
{
"Network": "100.64.0.0/10",
"Backend": {
"Type": "vxlan"
}
}
typha_service_name: none
veth_mtu: "1500"
kind: ConfigMap
metadata:
name: canal-config
namespace: kube-system
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPPool
plural: ippools
singular: ippool
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
scope: Namespaced
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: NetworkSet
plural: networksets
singular: networkset
scope: Namespaced
version: v1
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- endpoints
- services
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- patch
- apiGroups:
- crd.projectcalico.org
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: canal
name: canal
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: canal
spec:
containers:
- env:
- name: DATASTORE_TYPE
value: kubernetes
- name: USE_POD_CIDR
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
value: none
- name: CLUSTER_TYPE
value: k8s,canal
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
- name: IP
value: ""
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
key: veth_mtu
name: canal-config
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_LOGSEVERITYSCREEN
value: INFO
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: ACCEPT
- name: FELIX_CHAININSERTMODE
value: insert
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "false"
- name: FELIX_PROMETHEUSMETRICSPORT
value: "9091"
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "true"
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "true"
- name: FELIX_HEALTHENABLED
value: "true"
image: calico/node:v3.7.5
livenessProbe:
failureThreshold: 6
httpGet:
host: localhost
path: /liveness
port: 9099
initialDelaySeconds: 10
periodSeconds: 10
name: calico-node
readinessProbe:
httpGet:
host: localhost
path: /readiness
port: 9099
periodSeconds: 10
resources:
requests:
cpu: 100m
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- command:
- /opt/bin/flanneld
- --ip-masq
- --kube-subnet-mgr
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
key: canal_iface
name: canal-config
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
key: masquerade
name: canal-config
image: coreos/flannel:v0.11.0
name: kube-flannel
securityContext:
privileged: true
volumeMounts:
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /etc/kube-flannel/
name: flannel-cfg
hostNetwork: true
initContainers:
- command:
- /install-cni.sh
env:
- name: CNI_CONF_NAME
value: 10-canal.conflist
- name: CNI_MTU
valueFrom:
configMapKeyRef:
key: veth_mtu
name: canal-config
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
key: cni_network_config
name: canal-config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: SLEEP
value: "false"
image: calico/cni:v3.7.5
name: install-cni
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
nodeSelector:
beta.kubernetes.io/os: linux
priorityClassName: system-node-critical
serviceAccountName: canal
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- hostPath:
path: /lib/modules
name: lib-modules
- hostPath:
path: /var/run/calico
name: var-run-calico
- hostPath:
path: /var/lib/calico
name: var-lib-calico
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- configMap:
name: canal-config
name: flannel-cfg
- hostPath:
path: /opt/cni/bin
name: cni-bin-dir
- hostPath:
path: /etc/cni/net.d
name: cni-net-dir
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
I0901 15:06:06.590200 65709 bootstrapchannelbuilder.go:81] hash 5c7eff3a9427c9bebf9ea53830151e222b30fea8
I0901 15:06:06.594439 65709 visitor.go:35] string value at spec.template.spec.containers.[0].securityContext.privileged: true
I0901 15:06:06.594448 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[0].readOnly: true
I0901 15:06:06.594451 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[1].readOnly: false
I0901 15:06:06.594453 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[2].readOnly: false
I0901 15:06:06.594461 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[3].readOnly: false
I0901 15:06:06.594473 65709 images.go:59] Consider image for re-mapping: "calico/node:v3.12.2"
I0901 15:06:06.594480 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 6.000000
I0901 15:06:06.594484 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 10.000000
I0901 15:06:06.594486 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.periodSeconds: 10.000000
I0901 15:06:06.594489 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.httpGet.port: 9099.000000
I0901 15:06:06.594491 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.periodSeconds: 10.000000
I0901 15:06:06.594494 65709 visitor.go:35] string value at spec.template.spec.containers.[1].volumeMounts.[0].readOnly: false
I0901 15:06:06.594501 65709 images.go:59] Consider image for re-mapping: "quay.io/coreos/flannel:v0.11.0"
I0901 15:06:06.594529 65709 visitor.go:35] string value at spec.template.spec.containers.[1].securityContext.privileged: true
I0901 15:06:06.594532 65709 visitor.go:35] string value at spec.template.spec.hostNetwork: true
I0901 15:06:06.594534 65709 visitor.go:40] float64 value at spec.template.spec.terminationGracePeriodSeconds: 0.000000
I0901 15:06:06.594546 65709 images.go:59] Consider image for re-mapping: "calico/cni:v3.12.2"
I0901 15:06:06.594550 65709 visitor.go:35] string value at spec.template.spec.initContainers.[0].securityContext.privileged: true
I0901 15:06:06.594553 65709 visitor.go:35] string value at spec.template.spec.initContainers.[1].securityContext.privileged: true
I0901 15:06:06.594556 65709 images.go:59] Consider image for re-mapping: "calico/pod2daemon-flexvol:v3.12.2"
I0901 15:06:06.594560 65709 visitor.go:40] float64 value at spec.updateStrategy.rollingUpdate.maxUnavailable: 1.000000
I0901 15:06:06.595858 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: v1
data:
canal_iface: ""
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
masquerade: "true"
net-conf.json: |
{
"Network": "100.64.0.0/10",
"Backend": {
"Type": "vxlan"
}
}
typha_service_name: none
veth_mtu: "1440"
kind: ConfigMap
metadata:
labels:
role.kubernetes.io/networking: "1"
name: canal-config
namespace: kube-system
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: felixconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: ipamblocks.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: blockaffinities.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: ipamhandles.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: ipamconfigs.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: bgppeers.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: bgpconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: ippools.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPPool
plural: ippools
singular: ippool
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: hostendpoints.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: clusterinformations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: globalnetworkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: globalnetworksets.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: networkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
scope: Namespaced
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: networksets.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: NetworkSet
plural: networksets
singular: networkset
scope: Namespaced
version: v1
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
role.kubernetes.io/networking: "1"
name: calico
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- endpoints
- services
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- patch
- apiGroups:
- crd.projectcalico.org
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
role.kubernetes.io/networking: "1"
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
role.kubernetes.io/networking: "1"
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
role.kubernetes.io/networking: "1"
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: canal
role.kubernetes.io/networking: "1"
name: canal
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: canal
role.kubernetes.io/networking: "1"
spec:
containers:
- env:
- name: DATASTORE_TYPE
value: kubernetes
- name: USE_POD_CIDR
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
value: none
- name: CLUSTER_TYPE
value: k8s,canal
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
- name: IP
value: ""
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
key: veth_mtu
name: canal-config
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: ACCEPT
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_LOGSEVERITYSCREEN
value: INFO
- name: FELIX_HEALTHENABLED
value: "true"
- name: FELIX_CHAININSERTMODE
value: insert
- name: FELIX_IPTABLESBACKEND
value: Auto
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "false"
- name: FELIX_PROMETHEUSMETRICSPORT
value: "9091"
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "true"
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "true"
image: calico/node:v3.12.2
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
failureThreshold: 6
initialDelaySeconds: 10
periodSeconds: 10
name: calico-node
readinessProbe:
httpGet:
host: localhost
path: /readiness
port: 9099
periodSeconds: 10
resources:
requests:
cpu: 250m
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- mountPath: /var/run/nodeagent
name: policysync
- command:
- /opt/bin/flanneld
- --ip-masq
- --kube-subnet-mgr
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
key: canal_iface
name: canal-config
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
key: masquerade
name: canal-config
image: coreos/flannel:v0.11.0
name: kube-flannel
securityContext:
privileged: true
volumeMounts:
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /etc/kube-flannel/
name: flannel-cfg
hostNetwork: true
initContainers:
- command:
- /install-cni.sh
env:
- name: CNI_CONF_NAME
value: 10-canal.conflist
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
key: cni_network_config
name: canal-config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CNI_MTU
valueFrom:
configMapKeyRef:
key: veth_mtu
name: canal-config
- name: SLEEP
value: "false"
image: calico/cni:v3.12.2
name: install-cni
securityContext:
privileged: true
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- image: calico/pod2daemon-flexvol:v3.12.2
name: flexvol-driver
securityContext:
privileged: true
volumeMounts:
- mountPath: /host/driver
name: flexvol-driver-host
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
serviceAccountName: canal
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- hostPath:
path: /lib/modules
name: lib-modules
- hostPath:
path: /var/run/calico
name: var-run-calico
- hostPath:
path: /var/lib/calico
name: var-lib-calico
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- configMap:
name: canal-config
name: flannel-cfg
- hostPath:
path: /opt/cni/bin
name: cni-bin-dir
- hostPath:
path: /etc/cni/net.d
name: cni-net-dir
- hostPath:
path: /var/run/nodeagent
type: DirectoryOrCreate
name: policysync
- hostPath:
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
type: DirectoryOrCreate
name: flexvol-driver-host
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
role.kubernetes.io/networking: "1"
name: canal
namespace: kube-system
I0901 15:06:06.595900 65709 bootstrapchannelbuilder.go:81] hash 7f54193ede1ba5b1c5f327e01563ca1bf1d67344
I0901 15:06:06.600721 65709 images.go:59] Consider image for re-mapping: "calico/cni:v3.13.4"
I0901 15:06:06.600732 65709 visitor.go:35] string value at spec.template.spec.initContainers.[0].securityContext.privileged: true
I0901 15:06:06.600736 65709 images.go:59] Consider image for re-mapping: "calico/pod2daemon-flexvol:v3.13.4"
I0901 15:06:06.600741 65709 visitor.go:35] string value at spec.template.spec.initContainers.[1].securityContext.privileged: true
I0901 15:06:06.600745 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.httpGet.port: 9099.000000
I0901 15:06:06.600749 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.periodSeconds: 10.000000
I0901 15:06:06.600752 65709 visitor.go:35] string value at spec.template.spec.containers.[0].securityContext.privileged: true
I0901 15:06:06.600754 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[0].readOnly: true
I0901 15:06:06.600757 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[1].readOnly: false
I0901 15:06:06.600759 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[2].readOnly: false
I0901 15:06:06.600762 65709 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[3].readOnly: false
I0901 15:06:06.600772 65709 images.go:59] Consider image for re-mapping: "calico/node:v3.13.4"
I0901 15:06:06.600776 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 10.000000
I0901 15:06:06.600778 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.periodSeconds: 10.000000
I0901 15:06:06.600781 65709 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 6.000000
I0901 15:06:06.600788 65709 images.go:59] Consider image for re-mapping: "quay.io/coreos/flannel:v0.11.0"
I0901 15:06:06.600804 65709 visitor.go:35] string value at spec.template.spec.containers.[1].securityContext.privileged: true
I0901 15:06:06.600808 65709 visitor.go:35] string value at spec.template.spec.containers.[1].volumeMounts.[0].readOnly: false
I0901 15:06:06.600810 65709 visitor.go:35] string value at spec.template.spec.hostNetwork: true
I0901 15:06:06.600812 65709 visitor.go:40] float64 value at spec.template.spec.terminationGracePeriodSeconds: 0.000000
I0901 15:06:06.600821 65709 visitor.go:40] float64 value at spec.updateStrategy.rollingUpdate.maxUnavailable: 1.000000
I0901 15:06:06.601748 65709 bootstrapchannelbuilder.go:78] Manifest apiVersion: v1
data:
canal_iface: ""
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
masquerade: "true"
net-conf.json: |
{
"Network": "100.64.0.0/10",
"Backend": {
"Type": "vxlan"
}
}
typha_service_name: none
veth_mtu: "1440"
kind: ConfigMap
metadata:
labels:
role.kubernetes.io/networking: "1"
name: canal-config
namespace: kube-system
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: bgpconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: bgppeers.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: blockaffinities.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: clusterinformations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: felixconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: globalnetworkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: globalnetworksets.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: hostendpoints.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: ipamblocks.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: ipamconfigs.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: ipamhandles.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: ippools.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPPool
plural: ippools
singular: ippool
scope: Cluster
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: networkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
scope: Namespaced
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
role.kubernetes.io/networking: "1"
name: networksets.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: NetworkSet
plural: networksets
singular: networkset
scope: Namespaced
version: v1
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
role.kubernetes.io/networking: "1"
name: calico
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- endpoints
- services
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- patch
- apiGroups:
- crd.projectcalico.org
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
role.kubernetes.io/networking: "1"
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
role.kubernetes.io/networking: "1"
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
role.kubernetes.io/networking: "1"
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: canal
role.kubernetes.io/networking: "1"
name: canal
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
labels:
k8s-app: canal
role.kubernetes.io/networking: "1"
spec:
containers:
- env:
- name: DATASTORE_TYPE
value: kubernetes
- name: USE_POD_CIDR
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
value: none
- name: CLUSTER_TYPE
value: k8s,canal
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
- name: IP
value: ""
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
key: veth_mtu
name: canal-config
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: ACCEPT
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_LOGSEVERITYSCREEN
value: info
- name: FELIX_HEALTHENABLED
value: "true"
- name: FELIX_CHAININSERTMODE
value: insert
- name: FELIX_IPTABLESBACKEND
value: Auto
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "false"
- name: FELIX_PROMETHEUSMETRICSPORT
value: "9091"
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "true"
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "true"
image: calico/node:v3.13.4
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
failureThreshold: 6
initialDelaySeconds: 10
periodSeconds: 10
name: calico-node
readinessProbe:
httpGet:
host: localhost
path: /readiness
port: 9099
periodSeconds: 10
resources:
requests:
cpu: 90m
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- mountPath: /var/run/nodeagent
name: policysync
- command:
- /opt/bin/flanneld
- --ip-masq
- --kube-subnet-mgr
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
key: canal_iface
name: canal-config
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
key: masquerade
name: canal-config
image: coreos/flannel:v0.11.0
name: kube-flannel
securityContext:
privileged: true
volumeMounts:
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /etc/kube-flannel/
name: flannel-cfg
hostNetwork: true
initContainers:
- command:
- /install-cni.sh
env:
- name: CNI_CONF_NAME
value: 10-canal.conflist
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
key: cni_network_config
name: canal-config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CNI_MTU
valueFrom:
configMapKeyRef:
key: veth_mtu
name: canal-config
- name: SLEEP
value: "false"
image: calico/cni:v3.13.4
name: install-cni
securityContext:
privileged: true
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- image: calico/pod2daemon-flexvol:v3.13.4
name: flexvol-driver
securityContext:
privileged: true
volumeMounts:
- mountPath: /host/driver
name: flexvol-driver-host
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
serviceAccountName: canal
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- hostPath:
path: /lib/modules
name: lib-modules
- hostPath:
path: /var/run/calico
name: var-run-calico
- hostPath:
path: /var/lib/calico
name: var-lib-calico
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- configMap:
name: canal-config
name: flannel-cfg
- hostPath:
path: /opt/cni/bin
name: cni-bin-dir
- hostPath:
path: /etc/cni/net.d
name: cni-net-dir
- hostPath:
path: /var/run/nodeagent
type: DirectoryOrCreate
name: policysync
- hostPath:
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
type: DirectoryOrCreate
name: flexvol-driver-host
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
role.kubernetes.io/networking: "1"
name: canal
namespace: kube-system
I0901 15:06:06.601776 65709 bootstrapchannelbuilder.go:81] hash ba17e5fdb4fddd62c35751223476f3e988a62931
I0901 15:06:06.602613 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602621 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602624 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602627 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602629 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602634 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602639 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602642 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602644 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602646 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602649 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602654 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.602658 65709 task.go:102] testing task "Secret"
I0901 15:06:06.602661 65709 task.go:102] testing task "Secret"
I0901 15:06:06.602663 65709 task.go:102] testing task "Secret"
I0901 15:06:06.602666 65709 task.go:102] testing task "Secret"
I0901 15:06:06.602668 65709 task.go:102] testing task "Secret"
I0901 15:06:06.602671 65709 task.go:102] testing task "Secret"
I0901 15:06:06.602674 65709 task.go:102] testing task "Secret"
I0901 15:06:06.602676 65709 task.go:102] testing task "Secret"
I0901 15:06:06.602678 65709 task.go:102] testing task "Secret"
I0901 15:06:06.602687 65709 task.go:102] testing task "MirrorSecrets"
I0901 15:06:06.602693 65709 task.go:102] testing task "MirrorKeystore"
I0901 15:06:06.607597 65709 task.go:102] testing task "ManagedFile"
I0901 15:06:06.607775 65709 build_flags.go:49] ignoring non-field:
I0901 15:06:06.607785 65709 build_flags.go:49] ignoring non-field:
I0901 15:06:06.607816 65709 proxy.go:30] proxies is == nil, returning empty list
I0901 15:06:06.608111 65709 task.go:102] testing task "ManagedFile"
I0901 15:06:06.608127 65709 task.go:102] testing task "ManagedFile"
I0901 15:06:06.608130 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.608133 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.608135 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.608286 65709 build_flags.go:49] ignoring non-field:
I0901 15:06:06.608295 65709 build_flags.go:49] ignoring non-field:
I0901 15:06:06.608315 65709 proxy.go:30] proxies is == nil, returning empty list
I0901 15:06:06.608587 65709 task.go:102] testing task "ManagedFile"
I0901 15:06:06.608595 65709 task.go:102] testing task "ManagedFile"
I0901 15:06:06.608601 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.608605 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.608608 65709 task.go:102] testing task "Keypair"
I0901 15:06:06.608616 65709 task.go:75] EnsureTask ignoring identical
I0901 15:06:06.608630 65709 task.go:102] testing task "EBSVolume"
I0901 15:06:06.608637 65709 task.go:102] testing task "EBSVolume"
I0901 15:06:06.608653 65709 task.go:102] testing task "LoadBalancer"
I0901 15:06:06.608663 65709 task.go:102] testing task "SecurityGroup"
I0901 15:06:06.608668 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.608672 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.608675 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.608685 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.608691 65709 task.go:102] testing task "LoadBalancerAttachment"
I0901 15:06:06.608699 65709 task.go:102] testing task "DNSZone"
I0901 15:06:06.608703 65709 task.go:102] testing task "DNSZone"
I0901 15:06:06.608712 65709 task.go:75] EnsureTask ignoring identical
I0901 15:06:06.608716 65709 task.go:102] testing task "DNSName"
I0901 15:06:06.608725 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.608729 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.608732 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.608734 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.608737 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.608739 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.609042 65709 task.go:102] testing task "SecurityGroup"
I0901 15:06:06.609054 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.609060 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.609079 65709 task.go:102] testing task "SecurityGroup"
I0901 15:06:06.609087 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.609092 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.609097 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.610059 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.610068 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.610072 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.610135 65709 task.go:102] testing task "SecurityGroupRule"
I0901 15:06:06.610208 65709 task.go:102] testing task "SSHKey"
I0901 15:06:06.610321 65709 context.go:242] Skipping Name tag for shared resource
I0901 15:06:06.610328 65709 context.go:255] Skipping "KubernetesCluster" tag for shared resource
I0901 15:06:06.610368 65709 network.go:70] Kubernetes version "1.17.11"; skipping EnableDNSHostnames requirement on VPC
I0901 15:06:06.610376 65709 task.go:102] testing task "VPC"
I0901 15:06:06.610383 65709 context.go:242] Skipping Name tag for shared resource
I0901 15:06:06.610400 65709 context.go:255] Skipping "KubernetesCluster" tag for shared resource
I0901 15:06:06.610403 65709 task.go:102] testing task "InternetGateway"
I0901 15:06:06.610410 65709 task.go:102] testing task "RouteTable"
I0901 15:06:06.610415 65709 task.go:102] testing task "Route"
I0901 15:06:06.610424 65709 network.go:202] applying subnet tags
I0901 15:06:06.610428 65709 task.go:102] testing task "Subnet"
I0901 15:06:06.610433 65709 task.go:102] testing task "RouteTableAssociation"
I0901 15:06:06.610442 65709 network.go:202] applying subnet tags
I0901 15:06:06.610446 65709 task.go:102] testing task "Subnet"
I0901 15:06:06.610448 65709 task.go:102] testing task "RouteTableAssociation"
I0901 15:06:06.610453 65709 context.go:242] Skipping Name tag for shared resource
I0901 15:06:06.610457 65709 context.go:255] Skipping "KubernetesCluster" tag for shared resource
I0901 15:06:06.610460 65709 task.go:102] testing task "NatGateway"
I0901 15:06:06.610466 65709 task.go:102] testing task "RouteTable"
I0901 15:06:06.610469 65709 task.go:102] testing task "Route"
I0901 15:06:06.610505 65709 task.go:102] testing task "IAMRole"
I0901 15:06:06.610511 65709 task.go:102] testing task "IAMRolePolicy"
I0901 15:06:06.610516 65709 task.go:102] testing task "IAMInstanceProfile"
I0901 15:06:06.610521 65709 task.go:102] testing task "IAMInstanceProfileRole"
I0901 15:06:06.610589 65709 task.go:102] testing task "IAMRolePolicy"
I0901 15:06:06.610616 65709 task.go:102] testing task "IAMRole"
I0901 15:06:06.610618 65709 task.go:102] testing task "IAMRolePolicy"
I0901 15:06:06.610621 65709 task.go:102] testing task "IAMInstanceProfile"
I0901 15:06:06.610623 65709 task.go:102] testing task "IAMInstanceProfileRole"
I0901 15:06:06.610636 65709 task.go:102] testing task "IAMRolePolicy"
I0901 15:06:06.610675 65709 task.go:102] testing task "IAMRole"
F0901 15:06:06.610679 65709 task.go:59] found duplicate tasks with name "IAMRole/nodes.mycluster.domain.com": *awstasks.IAMRole {"ID":null,"Lifecycle":"Sync","Name":"nodes.mycluster.domain.com","RolePolicyDocument":{"Name":"","Resource":{}},"ExportWithID":"nodes"} and *awstasks.IAMRole {"ID":null,"Lifecycle":"Sync","Name":"nodes.mycluster.domain.com","RolePolicyDocument":{"Name":"","Resource":{}},"ExportWithID":"nodes"}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment