Skip to content

Instantly share code, notes, and snippets.

@danielepolencic
Last active April 18, 2021 02:51
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save danielepolencic/eec14942bdd15a220e41efbe4bd70d12 to your computer and use it in GitHub Desktop.
Save danielepolencic/eec14942bdd15a220e41efbe4bd70d12 to your computer and use it in GitHub Desktop.
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
rules:
- apiGroups: [""]
resources: ["events", "endpoints"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["endpoints"]
resourceNames: ["cluster-autoscaler"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch", "list", "get", "update"]
- apiGroups: [""]
resources:
- "pods"
- "services"
- "replicationcontrollers"
- "persistentvolumeclaims"
- "persistentvolumes"
verbs: ["watch", "list", "get"]
- apiGroups: ["extensions"]
resources: ["replicasets", "daemonsets"]
verbs: ["watch", "list", "get"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["watch", "list"]
- apiGroups: ["apps"]
resources: ["statefulsets", "replicasets", "daemonsets"]
verbs: ["watch", "list", "get"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "csinodes"]
verbs: ["watch", "list", "get"]
- apiGroups: ["batch", "extensions"]
resources: ["jobs"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resourceNames: ["cluster-autoscaler"]
resources: ["leases"]
verbs: ["get", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create","list","watch"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"]
verbs: ["delete", "get", "update", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
app: cluster-autoscaler
spec:
replicas: 1
selector:
matchLabels:
app: cluster-autoscaler
template:
metadata:
labels:
app: cluster-autoscaler
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '8085'
spec:
serviceAccountName: cluster-autoscaler
containers:
- image: k8s.gcr.io/autoscaling/cluster-autoscaler:v1.20.0
name: cluster-autoscaler
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- ./cluster-autoscaler
- --v=2
- --cloud-provider=linode
- --cloud-config=/config/cloud-config
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs/ca-certificates.crt
readOnly: true
- name: cloud-config
mountPath: /config
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs/ca-certificates.crt"
- name: cloud-config
secret:
secretName: cluster-autoscaler-cloud-config
I0418 02:28:50.255328 1 flags.go:52] FLAG: --add-dir-header="false"
I0418 02:28:50.255482 1 flags.go:52] FLAG: --address=":8085"
I0418 02:28:50.255525 1 flags.go:52] FLAG: --alsologtostderr="false"
I0418 02:28:50.255543 1 flags.go:52] FLAG: --aws-use-static-instance-list="false"
I0418 02:28:50.255559 1 flags.go:52] FLAG: --balance-similar-node-groups="false"
I0418 02:28:50.255590 1 flags.go:52] FLAG: --balancing-ignore-label="[]"
I0418 02:28:50.255608 1 flags.go:52] FLAG: --cloud-config="/config/cloud-config"
I0418 02:28:50.255624 1 flags.go:52] FLAG: --cloud-provider="linode"
I0418 02:28:50.255638 1 flags.go:52] FLAG: --cloud-provider-gce-l7lb-src-cidrs="130.211.0.0/22,35.191.0.0/16"
I0418 02:28:50.255674 1 flags.go:52] FLAG: --cloud-provider-gce-lb-src-cidrs="130.211.0.0/22,209.85.152.0/22,209.85.204.0/22,35.191.0.0/16"
I0418 02:28:50.255694 1 flags.go:52] FLAG: --cluster-name=""
I0418 02:28:50.255708 1 flags.go:52] FLAG: --clusterapi-cloud-config-authoritative="false"
I0418 02:28:50.255743 1 flags.go:52] FLAG: --cores-total="0:320000"
I0418 02:28:50.255762 1 flags.go:52] FLAG: --estimator="binpacking"
I0418 02:28:50.255777 1 flags.go:52] FLAG: --expander="random"
I0418 02:28:50.255796 1 flags.go:52] FLAG: --expendable-pods-priority-cutoff="-10"
I0418 02:28:50.255831 1 flags.go:52] FLAG: --gpu-total="[]"
I0418 02:28:50.255848 1 flags.go:52] FLAG: --ignore-daemonsets-utilization="false"
I0418 02:28:50.255872 1 flags.go:52] FLAG: --ignore-mirror-pods-utilization="false"
I0418 02:28:50.255908 1 flags.go:52] FLAG: --ignore-taint="[]"
I0418 02:28:50.255925 1 flags.go:52] FLAG: --kubeconfig=""
I0418 02:28:50.255944 1 flags.go:52] FLAG: --kubernetes=""
I0418 02:28:50.255973 1 flags.go:52] FLAG: --leader-elect="true"
I0418 02:28:50.255995 1 flags.go:52] FLAG: --leader-elect-lease-duration="15s"
I0418 02:28:50.256045 1 flags.go:52] FLAG: --leader-elect-renew-deadline="10s"
I0418 02:28:50.256068 1 flags.go:52] FLAG: --leader-elect-resource-lock="leases"
I0418 02:28:50.256090 1 flags.go:52] FLAG: --leader-elect-resource-name=""
I0418 02:28:50.256137 1 flags.go:52] FLAG: --leader-elect-resource-namespace=""
I0418 02:28:50.256155 1 flags.go:52] FLAG: --leader-elect-retry-period="2s"
I0418 02:28:50.256174 1 flags.go:52] FLAG: --log-backtrace-at=":0"
I0418 02:28:50.256195 1 flags.go:52] FLAG: --log-dir=""
I0418 02:28:50.256242 1 flags.go:52] FLAG: --log-file=""
I0418 02:28:50.256261 1 flags.go:52] FLAG: --log-file-max-size="1800"
I0418 02:28:50.256277 1 flags.go:52] FLAG: --logtostderr="true"
I0418 02:28:50.256296 1 flags.go:52] FLAG: --max-autoprovisioned-node-group-count="15"
I0418 02:28:50.256329 1 flags.go:52] FLAG: --max-bulk-soft-taint-count="10"
I0418 02:28:50.256361 1 flags.go:52] FLAG: --max-bulk-soft-taint-time="3s"
I0418 02:28:50.256377 1 flags.go:52] FLAG: --max-empty-bulk-delete="10"
I0418 02:28:50.256404 1 flags.go:52] FLAG: --max-failing-time="15m0s"
I0418 02:28:50.256454 1 flags.go:52] FLAG: --max-graceful-termination-sec="600"
I0418 02:28:50.256476 1 flags.go:52] FLAG: --max-inactivity="10m0s"
I0418 02:28:50.256495 1 flags.go:52] FLAG: --max-node-provision-time="15m0s"
I0418 02:28:50.256511 1 flags.go:52] FLAG: --max-nodes-total="0"
I0418 02:28:50.256563 1 flags.go:52] FLAG: --max-total-unready-percentage="45"
I0418 02:28:50.256598 1 flags.go:52] FLAG: --memory-total="0:6400000"
I0418 02:28:50.256615 1 flags.go:52] FLAG: --min-replica-count="0"
I0418 02:28:50.256633 1 flags.go:52] FLAG: --namespace="kube-system"
I0418 02:28:50.256669 1 flags.go:52] FLAG: --new-pod-scale-up-delay="0s"
I0418 02:28:50.256687 1 flags.go:52] FLAG: --node-autoprovisioning-enabled="false"
I0418 02:28:50.256703 1 flags.go:52] FLAG: --node-deletion-delay-timeout="2m0s"
I0418 02:28:50.256738 1 flags.go:52] FLAG: --node-group-auto-discovery="[]"
I0418 02:28:50.256759 1 flags.go:52] FLAG: --nodes="[]"
I0418 02:28:50.256777 1 flags.go:52] FLAG: --ok-total-unready-count="3"
I0418 02:28:50.256792 1 flags.go:52] FLAG: --one-output="false"
I0418 02:28:50.256835 1 flags.go:52] FLAG: --profiling="false"
I0418 02:28:50.256854 1 flags.go:52] FLAG: --regional="false"
I0418 02:28:50.256873 1 flags.go:52] FLAG: --scale-down-candidates-pool-min-count="50"
I0418 02:28:50.256902 1 flags.go:52] FLAG: --scale-down-candidates-pool-ratio="0.1"
I0418 02:28:50.256941 1 flags.go:52] FLAG: --scale-down-delay-after-add="10m0s"
I0418 02:28:50.256964 1 flags.go:52] FLAG: --scale-down-delay-after-delete="0s"
I0418 02:28:50.256982 1 flags.go:52] FLAG: --scale-down-delay-after-failure="3m0s"
I0418 02:28:50.257017 1 flags.go:52] FLAG: --scale-down-enabled="true"
I0418 02:28:50.257037 1 flags.go:52] FLAG: --scale-down-gpu-utilization-threshold="0.5"
I0418 02:28:50.257054 1 flags.go:52] FLAG: --scale-down-non-empty-candidates-count="30"
I0418 02:28:50.257072 1 flags.go:52] FLAG: --scale-down-unneeded-time="10m0s"
I0418 02:28:50.257107 1 flags.go:52] FLAG: --scale-down-unready-time="20m0s"
I0418 02:28:50.257140 1 flags.go:52] FLAG: --scale-down-utilization-threshold="0.5"
I0418 02:28:50.257157 1 flags.go:52] FLAG: --scale-up-from-zero="true"
I0418 02:28:50.257175 1 flags.go:52] FLAG: --scan-interval="10s"
I0418 02:28:50.257210 1 flags.go:52] FLAG: --skip-headers="false"
I0418 02:28:50.257228 1 flags.go:52] FLAG: --skip-log-headers="false"
I0418 02:28:50.257243 1 flags.go:52] FLAG: --skip-nodes-with-local-storage="true"
I0418 02:28:50.257262 1 flags.go:52] FLAG: --skip-nodes-with-system-pods="true"
I0418 02:28:50.257306 1 flags.go:52] FLAG: --stderrthreshold="2"
I0418 02:28:50.348862 1 flags.go:52] FLAG: --unremovable-node-recheck-timeout="5m0s"
I0418 02:28:50.348989 1 flags.go:52] FLAG: --v="2"
I0418 02:28:50.349030 1 flags.go:52] FLAG: --vmodule=""
I0418 02:28:50.349085 1 flags.go:52] FLAG: --write-status-configmap="true"
I0418 02:28:50.349139 1 main.go:379] Cluster Autoscaler 1.20.0
I0418 02:28:50.375755 1 leaderelection.go:243] attempting to acquire leader lease kube-system/cluster-autoscaler...
I0418 02:28:50.456871 1 leaderelection.go:253] successfully acquired lease kube-system/cluster-autoscaler
I0418 02:28:50.553022 1 reflector.go:219] Starting reflector *v1.Pod (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:188
I0418 02:28:50.553498 1 reflector.go:219] Starting reflector *v1.Pod (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:212
I0418 02:28:50.553782 1 reflector.go:219] Starting reflector *v1.Node (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:246
I0418 02:28:50.554051 1 reflector.go:219] Starting reflector *v1.Node (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:246
I0418 02:28:50.554327 1 reflector.go:219] Starting reflector *v1beta1.PodDisruptionBudget (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:309
I0418 02:28:50.649443 1 reflector.go:219] Starting reflector *v1.Job (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:338
I0418 02:28:50.649697 1 reflector.go:219] Starting reflector *v1.ReplicaSet (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:347
I0418 02:28:50.649904 1 reflector.go:219] Starting reflector *v1.DaemonSet (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:320
I0418 02:28:50.650194 1 reflector.go:219] Starting reflector *v1.ReplicationController (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:329
I0418 02:28:50.651181 1 reflector.go:219] Starting reflector *v1.StatefulSet (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:356
I0418 02:28:51.453647 1 cloud_provider_builder.go:29] Building linode cloud provider.
F0418 02:28:51.453684 1 cloud_provider_builder.go:50] Unknown cloud provider: linode
goroutine 62 [running]:
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2.stacks(0xc000128001, 0xc0000aaa00, 0x5b, 0x13d)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2/klog.go:1026 +0xb8
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2.(*loggingT).output(0x6169d60, 0xc000000003, 0x0, 0x0, 0xc0005cee70, 0x6063268, 0x19, 0x32, 0x0)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2/klog.go:975 +0x1a3
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2.(*loggingT).printf(0x6169d60, 0xc000000003, 0x0, 0x0, 0x0, 0x0, 0x3afddfd, 0x1a, 0xc0007c1cf0, 0x1, ...)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2/klog.go:750 +0x18b
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2.Fatalf(...)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2/klog.go:1502
k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder.NewCloudProvider(0xa, 0x3fe0000000000000, 0x3fe0000000000000, 0x8bb2c97000, 0x1176592e000, 0x0, 0x4e200, 0x0, 0x186a0000000000, 0x0, ...)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder/cloud_provider_builder.go:50 +0x2d6
k8s.io/autoscaler/cluster-autoscaler/core.initializeDefaultOptions(0xc000707770, 0x0, 0x203000)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/core/autoscaler.go:101 +0x2fd
k8s.io/autoscaler/cluster-autoscaler/core.NewAutoscaler(0xa, 0x3fe0000000000000, 0x3fe0000000000000, 0x8bb2c97000, 0x1176592e000, 0x0, 0x4e200, 0x0, 0x186a0000000000, 0x0, ...)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/core/autoscaler.go:65 +0x43
main.buildAutoscaler(0x920e93, 0xc0005cefc0, 0x4214340, 0xc0005e6380)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/main.go:325 +0x364
main.run(0xc000095950)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/main.go:331 +0x39
main.main.func2(0x41d0280, 0xc00087f000)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/main.go:435 +0x2a
created by k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection.(*LeaderElector).Run
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go:207 +0x113
goroutine 1 [select]:
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait.BackoffUntil(0xc000c9fc00, 0x416c0c0, 0xc00089f6e0, 0xc00087f001, 0xc0000bbc80)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:167 +0x13f
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait.JitterUntil(0xc000a7fc00, 0x77359400, 0x0, 0xc00087f001, 0xc0000bbc80)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:133 +0x98
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait.Until(...)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:90
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection.(*LeaderElector).renew(0xc0001feea0, 0x41d0280, 0xc00087f040)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go:263 +0x107
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection.(*LeaderElector).Run(0xc0001feea0, 0x41d0280, 0xc00087f000)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go:208 +0x13b
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection.RunOrDie(0x41d02c0, 0xc000114008, 0x4205d00, 0xc00092a8c0, 0x37e11d600, 0x2540be400, 0x77359400, 0xc00098bb00, 0x3c10808, 0x0, ...)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go:222 +0x96
main.main()
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/main.go:426 +0x82c
goroutine 18 [chan receive]:
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2.(*loggingT).flushDaemon(0x6169d60)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2/klog.go:1169 +0x8b
created by k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2.init.0
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/klog/v2/klog.go:417 +0xdd
goroutine 34 [chan receive]:
k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/k8s.io/klog.(*loggingT).flushDaemon(0x6169b80)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/k8s.io/klog/klog.go:1026 +0x8b
created by k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/k8s.io/klog.init.0
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/k8s.io/klog/klog.go:427 +0xd6
goroutine 123 [select]:
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache.(*Reflector).ListAndWatch.func2(0xc0001aeb60, 0xc0000ba6c0, 0xc000dcc180, 0xc0002fab40)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go:373 +0x159
created by k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache.(*Reflector).ListAndWatch
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go:367 +0x2a6
goroutine 35 [select]:
k8s.io/autoscaler/cluster-autoscaler/vendor/go.opencensus.io/stats/view.(*worker).start(0xc0000944b0)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/go.opencensus.io/stats/view/worker.go:154 +0x100
created by k8s.io/autoscaler/cluster-autoscaler/vendor/go.opencensus.io/stats/view.init.0
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/go.opencensus.io/stats/view/worker.go:32 +0x57
goroutine 50 [sleep]:
time.Sleep(0x3b9aca00)
/usr/local/go/src/runtime/time.go:188 +0xba
k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime.(*metricsRecorder).run(0xc0008665a0)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/metrics_recorder.go:88 +0x3f
created by k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime.newMetricsRecorder
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/metrics_recorder.go:60 +0x104
goroutine 55 [IO wait]:
internal/poll.runtime_pollWait(0x7f741f809040, 0x72, 0x0)
/usr/local/go/src/runtime/netpoll.go:203 +0x55
internal/poll.(*pollDesc).wait(0xc000288498, 0x72, 0x0, 0x0, 0x3acee55)
/usr/local/go/src/internal/poll/fd_poll_runtime.go:87 +0x45
internal/poll.(*pollDesc).waitRead(...)
/usr/local/go/src/internal/poll/fd_poll_runtime.go:92
internal/poll.(*FD).Accept(0xc000288480, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0)
/usr/local/go/src/internal/poll/fd_unix.go:384 +0x1d4
net.(*netFD).accept(0xc000288480, 0xc0005f4d58, 0xc00020aa80, 0x7f744648d7d0)
/usr/local/go/src/net/fd_unix.go:238 +0x42
net.(*TCPListener).accept(0xc00043e620, 0xc0005f4d98, 0x40cf28, 0x30)
/usr/local/go/src/net/tcpsock_posix.go:139 +0x32
net.(*TCPListener).Accept(0xc00043e620, 0x37611c0, 0xc0005ee5a0, 0x3389b20, 0x6138ed0)
/usr/local/go/src/net/tcpsock.go:261 +0x64
net/http.(*Server).Serve(0xc0005f8000, 0x41caac0, 0xc00043e620, 0x0, 0x0)
/usr/local/go/src/net/http/server.go:2930 +0x25d
net/http.(*Server).ListenAndServe(0xc0005f8000, 0xc0005f8000, 0xd)
/usr/local/go/src/net/http/server.go:2859 +0xb7
net/http.ListenAndServe(...)
/usr/local/go/src/net/http/server.go:3115
main.main.func1(0xc000095950)
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/main.go:391 +0x21b
created by main.main
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/main.go:381 +0x277
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment