Created
May 1, 2018 01:06
-
-
Save cmcconnell1/4926a1085f09c5e537b8461fb03b0faa to your computer and use it in GitHub Desktop.
fresh kube-aws cluster deploy 3rd restart for the AS pod with modified code from https://github.com/kubernetes-incubator/kube-aws/pull/1268
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
kk logs cluster-autoscaler-59998c8cbf-9hqwq | |
I0501 01:00:45.176755 1 flags.go:52] FLAG: --address=":8085" | |
I0501 01:00:45.177259 1 flags.go:52] FLAG: --alsologtostderr="false" | |
I0501 01:00:45.177275 1 flags.go:52] FLAG: --application-metrics-count-limit="100" | |
I0501 01:00:45.177280 1 flags.go:52] FLAG: --azure-container-registry-config="" | |
I0501 01:00:45.177286 1 flags.go:52] FLAG: --balance-similar-node-groups="false" | |
I0501 01:00:45.177290 1 flags.go:52] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" | |
I0501 01:00:45.177294 1 flags.go:52] FLAG: --cloud-config="" | |
I0501 01:00:45.177386 1 flags.go:52] FLAG: --cloud-provider="aws" | |
I0501 01:00:45.177390 1 flags.go:52] FLAG: --cloud-provider-gce-lb-src-cidrs="209.85.204.0/22,130.211.0.0/22,35.191.0.0/16,209.85.152.0/22" | |
I0501 01:00:45.177451 1 flags.go:52] FLAG: --cluster-name="" | |
I0501 01:00:45.177463 1 flags.go:52] FLAG: --configmap="" | |
I0501 01:00:45.177467 1 flags.go:52] FLAG: --container-hints="/etc/cadvisor/container_hints.json" | |
I0501 01:00:45.177471 1 flags.go:52] FLAG: --containerd="unix:///var/run/containerd.sock" | |
I0501 01:00:45.177475 1 flags.go:52] FLAG: --cores-total="0:320000" | |
I0501 01:00:45.177478 1 flags.go:52] FLAG: --docker="unix:///var/run/docker.sock" | |
I0501 01:00:45.177482 1 flags.go:52] FLAG: --docker-env-metadata-whitelist="" | |
I0501 01:00:45.177486 1 flags.go:52] FLAG: --docker-only="false" | |
I0501 01:00:45.177490 1 flags.go:52] FLAG: --docker-root="/var/lib/docker" | |
I0501 01:00:45.177564 1 flags.go:52] FLAG: --docker-tls="false" | |
I0501 01:00:45.177576 1 flags.go:52] FLAG: --docker-tls-ca="ca.pem" | |
I0501 01:00:45.177579 1 flags.go:52] FLAG: --docker-tls-cert="cert.pem" | |
I0501 01:00:45.177583 1 flags.go:52] FLAG: --docker-tls-key="key.pem" | |
I0501 01:00:45.177586 1 flags.go:52] FLAG: --enable-load-reader="false" | |
I0501 01:00:45.177590 1 flags.go:52] FLAG: --estimator="binpacking" | |
I0501 01:00:45.177593 1 flags.go:52] FLAG: --event-storage-age-limit="default=0" | |
I0501 01:00:45.177597 1 flags.go:52] FLAG: --event-storage-event-limit="default=0" | |
I0501 01:00:45.177600 1 flags.go:52] FLAG: --expander="least-waste" | |
I0501 01:00:45.177604 1 flags.go:52] FLAG: --expendable-pods-priority-cutoff="0" | |
I0501 01:00:45.177608 1 flags.go:52] FLAG: --gke-api-endpoint="" | |
I0501 01:00:45.177611 1 flags.go:52] FLAG: --global-housekeeping-interval="1m0s" | |
I0501 01:00:45.177615 1 flags.go:52] FLAG: --google-json-key="" | |
I0501 01:00:45.177618 1 flags.go:52] FLAG: --housekeeping-interval="10s" | |
I0501 01:00:45.177622 1 flags.go:52] FLAG: --httptest.serve="" | |
I0501 01:00:45.177625 1 flags.go:52] FLAG: --kubeconfig="" | |
I0501 01:00:45.177628 1 flags.go:52] FLAG: --kubernetes="" | |
I0501 01:00:45.177632 1 flags.go:52] FLAG: --leader-elect="true" | |
I0501 01:00:45.177640 1 flags.go:52] FLAG: --leader-elect-lease-duration="15s" | |
I0501 01:00:45.177645 1 flags.go:52] FLAG: --leader-elect-renew-deadline="10s" | |
I0501 01:00:45.177648 1 flags.go:52] FLAG: --leader-elect-resource-lock="endpoints" | |
I0501 01:00:45.177652 1 flags.go:52] FLAG: --leader-elect-retry-period="2s" | |
I0501 01:00:45.177655 1 flags.go:52] FLAG: --log-backtrace-at=":0" | |
I0501 01:00:45.177661 1 flags.go:52] FLAG: --log-cadvisor-usage="false" | |
I0501 01:00:45.177665 1 flags.go:52] FLAG: --log-dir="" | |
I0501 01:00:45.177669 1 flags.go:52] FLAG: --logtostderr="false" | |
I0501 01:00:45.177672 1 flags.go:52] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" | |
I0501 01:00:45.177676 1 flags.go:52] FLAG: --max-autoprovisioned-node-group-count="15" | |
I0501 01:00:45.177680 1 flags.go:52] FLAG: --max-empty-bulk-delete="10" | |
I0501 01:00:45.177683 1 flags.go:52] FLAG: --max-failing-time="15m0s" | |
I0501 01:00:45.177687 1 flags.go:52] FLAG: --max-graceful-termination-sec="600" | |
I0501 01:00:45.177690 1 flags.go:52] FLAG: --max-inactivity="10m0s" | |
I0501 01:00:45.177693 1 flags.go:52] FLAG: --max-node-provision-time="15m0s" | |
I0501 01:00:45.177700 1 flags.go:52] FLAG: --max-nodes-total="0" | |
I0501 01:00:45.177704 1 flags.go:52] FLAG: --max-total-unready-percentage="33" | |
I0501 01:00:45.177713 1 flags.go:52] FLAG: --memory-total="0:6400000" | |
I0501 01:00:45.177717 1 flags.go:52] FLAG: --min-replica-count="0" | |
I0501 01:00:45.177720 1 flags.go:52] FLAG: --namespace="kube-system" | |
I0501 01:00:45.177724 1 flags.go:52] FLAG: --node-autoprovisioning-enabled="false" | |
I0501 01:00:45.177727 1 flags.go:52] FLAG: --node-group-auto-discovery="asg:tag=k8s.io/cluster-autoscaler/enabled,kubernetes.io/cluster/opsdev-pr" | |
I0501 01:00:45.177733 1 flags.go:52] FLAG: --nodes="[]" | |
I0501 01:00:45.177737 1 flags.go:52] FLAG: --ok-total-unready-count="3" | |
I0501 01:00:45.177740 1 flags.go:52] FLAG: --scale-down-candidates-pool-min-count="50" | |
I0501 01:00:45.177744 1 flags.go:52] FLAG: --scale-down-candidates-pool-ratio="0.1" | |
I0501 01:00:45.177748 1 flags.go:52] FLAG: --scale-down-delay-after-add="10m0s" | |
I0501 01:00:45.177752 1 flags.go:52] FLAG: --scale-down-delay-after-delete="10s" | |
I0501 01:00:45.177755 1 flags.go:52] FLAG: --scale-down-delay-after-failure="3m0s" | |
I0501 01:00:45.177759 1 flags.go:52] FLAG: --scale-down-enabled="true" | |
I0501 01:00:45.177762 1 flags.go:52] FLAG: --scale-down-non-empty-candidates-count="30" | |
I0501 01:00:45.177765 1 flags.go:52] FLAG: --scale-down-unneeded-time="10m0s" | |
I0501 01:00:45.177769 1 flags.go:52] FLAG: --scale-down-unready-time="20m0s" | |
I0501 01:00:45.177772 1 flags.go:52] FLAG: --scale-down-utilization-threshold="0.5" | |
I0501 01:00:45.177776 1 flags.go:52] FLAG: --scan-interval="10s" | |
I0501 01:00:45.177779 1 flags.go:52] FLAG: --skip-nodes-with-local-storage="false" | |
I0501 01:00:45.177783 1 flags.go:52] FLAG: --skip-nodes-with-system-pods="false" | |
I0501 01:00:45.177804 1 flags.go:52] FLAG: --stderrthreshold="0" | |
I0501 01:00:45.177809 1 flags.go:52] FLAG: --storage-driver-buffer-duration="1m0s" | |
I0501 01:00:45.177812 1 flags.go:52] FLAG: --storage-driver-db="cadvisor" | |
I0501 01:00:45.177816 1 flags.go:52] FLAG: --storage-driver-host="localhost:8086" | |
I0501 01:00:45.177819 1 flags.go:52] FLAG: --storage-driver-password="root" | |
I0501 01:00:45.177823 1 flags.go:52] FLAG: --storage-driver-secure="false" | |
I0501 01:00:45.177826 1 flags.go:52] FLAG: --storage-driver-table="stats" | |
I0501 01:00:45.177829 1 flags.go:52] FLAG: --storage-driver-user="root" | |
I0501 01:00:45.177833 1 flags.go:52] FLAG: --test.bench="" | |
I0501 01:00:45.177836 1 flags.go:52] FLAG: --test.benchmem="false" | |
I0501 01:00:45.177839 1 flags.go:52] FLAG: --test.benchtime="1s" | |
I0501 01:00:45.177843 1 flags.go:52] FLAG: --test.blockprofile="" | |
I0501 01:00:45.177846 1 flags.go:52] FLAG: --test.blockprofilerate="1" | |
I0501 01:00:45.177849 1 flags.go:52] FLAG: --test.count="1" | |
I0501 01:00:45.177853 1 flags.go:52] FLAG: --test.coverprofile="" | |
I0501 01:00:45.177856 1 flags.go:52] FLAG: --test.cpu="" | |
I0501 01:00:45.177859 1 flags.go:52] FLAG: --test.cpuprofile="" | |
I0501 01:00:45.177862 1 flags.go:52] FLAG: --test.memprofile="" | |
I0501 01:00:45.177866 1 flags.go:52] FLAG: --test.memprofilerate="0" | |
I0501 01:00:45.177869 1 flags.go:52] FLAG: --test.mutexprofile="" | |
I0501 01:00:45.177872 1 flags.go:52] FLAG: --test.mutexprofilefraction="1" | |
I0501 01:00:45.177875 1 flags.go:52] FLAG: --test.outputdir="" | |
I0501 01:00:45.177878 1 flags.go:52] FLAG: --test.parallel="2" | |
I0501 01:00:45.177882 1 flags.go:52] FLAG: --test.run="" | |
I0501 01:00:45.177885 1 flags.go:52] FLAG: --test.short="false" | |
I0501 01:00:45.177888 1 flags.go:52] FLAG: --test.timeout="0s" | |
I0501 01:00:45.177891 1 flags.go:52] FLAG: --test.trace="" | |
I0501 01:00:45.177895 1 flags.go:52] FLAG: --test.v="false" | |
I0501 01:00:45.177898 1 flags.go:52] FLAG: --v="4" | |
I0501 01:00:45.177901 1 flags.go:52] FLAG: --version="false" | |
I0501 01:00:45.177908 1 flags.go:52] FLAG: --vmodule="" | |
I0501 01:00:45.177915 1 flags.go:52] FLAG: --write-status-configmap="true" | |
I0501 01:00:45.177924 1 main.go:287] Cluster Autoscaler 1.1.0 | |
I0501 01:00:45.373595 1 leaderelection.go:174] attempting to acquire leader lease... | |
I0501 01:00:45.383905 1 leaderelection.go:184] successfully acquired lease kube-system/cluster-autoscaler | |
I0501 01:00:45.384509 1 factory.go:33] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"cluster-autoscaler", UID:"13848483-4cda-11e8-ab55-06e962aa0a20", APIVersion:"v1", ResourceVersion:"1211", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' cluster-autoscaler-59998c8cbf-9hqwq became leader | |
I0501 01:00:45.385506 1 predicates.go:125] Using predicate PodFitsResources | |
I0501 01:00:45.385524 1 predicates.go:125] Using predicate GeneralPredicates | |
I0501 01:00:45.385529 1 predicates.go:125] Using predicate PodToleratesNodeTaints | |
I0501 01:00:45.385533 1 predicates.go:125] Using predicate NoDiskConflict | |
I0501 01:00:45.385538 1 predicates.go:125] Using predicate NoVolumeZoneConflict | |
I0501 01:00:45.385542 1 predicates.go:125] Using predicate MaxAzureDiskVolumeCount | |
I0501 01:00:45.385547 1 predicates.go:125] Using predicate CheckNodeDiskPressure | |
I0501 01:00:45.385551 1 predicates.go:125] Using predicate CheckNodeCondition | |
I0501 01:00:45.385555 1 predicates.go:125] Using predicate CheckNodeMemoryPressure | |
I0501 01:00:45.385559 1 predicates.go:125] Using predicate MatchInterPodAffinity | |
I0501 01:00:45.385564 1 predicates.go:125] Using predicate MaxEBSVolumeCount | |
I0501 01:00:45.385568 1 predicates.go:125] Using predicate MaxGCEPDVolumeCount | |
I0501 01:00:45.385572 1 predicates.go:125] Using predicate ready | |
I0501 01:00:45.385576 1 predicates.go:125] Using predicate CheckVolumeBinding | |
I0501 01:00:45.385769 1 reflector.go:202] Starting reflector *v1beta1.DaemonSet (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:293 | |
I0501 01:00:45.385809 1 reflector.go:240] Listing and watching *v1beta1.DaemonSet from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:293 | |
I0501 01:00:45.386068 1 reflector.go:202] Starting reflector *v1.Node (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.386168 1 reflector.go:240] Listing and watching *v1.Node from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.386272 1 reflector.go:202] Starting reflector *v1.ReplicationController (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.386282 1 reflector.go:240] Listing and watching *v1.ReplicationController from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.386133 1 reflector.go:202] Starting reflector *v1.PersistentVolumeClaim (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.386602 1 reflector.go:202] Starting reflector *v1beta1.PodDisruptionBudget (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:266 | |
I0501 01:00:45.386617 1 reflector.go:240] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:266 | |
I0501 01:00:45.386566 1 reflector.go:202] Starting reflector *v1.Pod (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:149 | |
I0501 01:00:45.386751 1 reflector.go:240] Listing and watching *v1.Pod from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:149 | |
I0501 01:00:45.386575 1 reflector.go:202] Starting reflector *v1.Pod (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:174 | |
I0501 01:00:45.386922 1 reflector.go:240] Listing and watching *v1.Pod from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:174 | |
I0501 01:00:45.386584 1 reflector.go:202] Starting reflector *v1.Node (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:212 | |
I0501 01:00:45.387066 1 reflector.go:240] Listing and watching *v1.Node from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:212 | |
I0501 01:00:45.386591 1 reflector.go:202] Starting reflector *v1.Node (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:239 | |
I0501 01:00:45.387196 1 reflector.go:240] Listing and watching *v1.Node from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:239 | |
I0501 01:00:45.387168 1 reflector.go:202] Starting reflector *v1beta1.ReplicaSet (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.387318 1 reflector.go:240] Listing and watching *v1beta1.ReplicaSet from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.387474 1 reflector.go:202] Starting reflector *v1.Pod (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.387490 1 reflector.go:240] Listing and watching *v1.Pod from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.387700 1 reflector.go:202] Starting reflector *v1.StorageClass (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.387710 1 reflector.go:240] Listing and watching *v1.StorageClass from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.386555 1 reflector.go:202] Starting reflector *v1beta1.StatefulSet (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.473140 1 reflector.go:240] Listing and watching *v1beta1.StatefulSet from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.473231 1 reflector.go:202] Starting reflector *v1beta1.PodDisruptionBudget (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.473251 1 reflector.go:240] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.386604 1 reflector.go:240] Listing and watching *v1.PersistentVolumeClaim from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.473048 1 reflector.go:202] Starting reflector *v1.Service (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.473879 1 reflector.go:240] Listing and watching *v1.Service from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.473858 1 reflector.go:202] Starting reflector *v1.PersistentVolume (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.573999 1 reflector.go:240] Listing and watching *v1.PersistentVolume from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86 | |
I0501 01:00:45.678878 1 request.go:480] Throttling request took 104.65713ms, request: GET:https://10.3.0.1:443/api/v1/persistentvolumes?limit=500&resourceVersion=0 | |
I0501 01:00:46.273024 1 request.go:480] Throttling request took 598.497167ms, request: PUT:https://10.3.0.1:443/api/v1/namespaces/kube-system/configmaps/cluster-autoscaler-status | |
I0501 01:00:47.487660 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:00:49.572963 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:00:51.672929 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:00:53.679881 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:00:55.691544 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:00:57.773136 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:00:59.879946 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:01.978589 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:03.986768 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:06.085512 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:08.093692 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:10.117429 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:12.177745 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:14.272988 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:16.285251 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:18.380724 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:20.572986 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:22.681251 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:24.689061 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:26.772986 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:28.780860 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:30.788491 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:32.978678 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:34.986484 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:36.997438 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:39.077988 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:41.085427 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:43.277151 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:45.373122 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:47.384855 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:49.392257 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:51.480205 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:53.673082 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:55.681509 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:57.784568 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:01:59.878755 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:01.887303 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:03.977806 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:06.072969 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:08.089400 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:10.180209 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:12.284300 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:14.292360 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:16.372941 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:18.385472 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:20.393263 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:22.401611 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:24.409136 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:26.418098 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:28.573082 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:30.673007 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:32.681335 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:34.773093 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:36.781573 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:38.792355 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:40.872957 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:42.880436 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:45.073014 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:47.173176 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler | |
I0501 01:02:47.370218 1 auto_scaling.go:138] Failed to describe ASG tags for keys [k8s.io/cluster-autoscaler/enabled kubernetes.io/cluster/opsdev-pr] : RequestError: send request failed | |
caused by: Post https://autoscaling.us-west-1.amazonaws.com/: dial tcp: i/o timeout | |
F0501 01:02:47.370359 1 cloud_provider_builder.go:112] Failed to create AWS cloud provider: Failed to get ASGs: RequestError: send request failed | |
caused by: Post https://autoscaling.us-west-1.amazonaws.com/: dial tcp: i/o timeout | |
goroutine 47 [running]: | |
k8s.io/autoscaler/cluster-autoscaler/vendor/github.com/golang/glog.stacks(0xc420a93e00, 0xc420108870, 0xec, 0xee) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/github.com/golang/glog/glog.go:766 +0xa7 | |
k8s.io/autoscaler/cluster-autoscaler/vendor/github.com/golang/glog.(*loggingT).output(0x5618fa0, 0xc400000003, 0xc42021fef0, 0x528d12b, 0x19, 0x70, 0x0) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/github.com/golang/glog/glog.go:717 +0x348 | |
k8s.io/autoscaler/cluster-autoscaler/vendor/github.com/golang/glog.(*loggingT).printf(0x5618fa0, 0x3, 0x3739198, 0x27, 0xc420b8ad20, 0x1, 0x1) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/github.com/golang/glog/glog.go:655 +0x14f | |
k8s.io/autoscaler/cluster-autoscaler/vendor/github.com/golang/glog.Fatalf(0x3739198, 0x27, 0xc420b8ad20, 0x1, 0x1) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/github.com/golang/glog/glog.go:1145 +0x67 | |
k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder.CloudProviderBuilder.Build(0x7ffc9602f85a, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder/cloud_provider_builder.go:112 +0x76a | |
k8s.io/autoscaler/cluster-autoscaler/core.NewAutoscalingContext(0xa, 0x3fe0000000000000, 0x8bb2c97000, 0x1176592e000, 0x0, 0x4e200, 0x0, 0x186a00000, 0x0, 0x7ffc9602f8db, ...) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/core/autoscaling_context.go:148 +0x466 | |
k8s.io/autoscaler/cluster-autoscaler/core.NewStaticAutoscaler(0xa, 0x3fe0000000000000, 0x8bb2c97000, 0x1176592e000, 0x0, 0x4e200, 0x0, 0x186a00000, 0x0, 0x7ffc9602f8db, ...) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/core/static_autoscaler.go:56 +0x14d | |
k8s.io/autoscaler/cluster-autoscaler/core.(*AutoscalerBuilderImpl).Build(0xc420b0f860, 0x412d18, 0xc420b8b850, 0x412d18, 0x1a0) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/core/autoscaler_builder.go:71 +0x10e | |
k8s.io/autoscaler/cluster-autoscaler/core.NewPollingAutoscaler(0x5514020, 0xc420b0f860, 0x78, 0x98, 0xc420b7aa00) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/core/polling_autoscaler.go:38 +0x35 | |
k8s.io/autoscaler/cluster-autoscaler/core.NewAutoscaler(0xa, 0x3fe0000000000000, 0x8bb2c97000, 0x1176592e000, 0x0, 0x4e200, 0x0, 0x186a00000, 0x0, 0x7ffc9602f8db, ...) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/core/autoscaler.go:64 +0x5f2 | |
main.run(0xc420a31e50) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/main.go:247 +0x263 | |
main.main.func2(0xc420b50d80) | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/main.go:345 +0x2a | |
created by k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection.(*LeaderElector).Run | |
/gopath/src/k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go:145 +0x97 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
waited for the sixth (6th) pod and still saw the errors, then killed the pod and the auto deployed replacement pod's logs look clean:
kk logs -f cluster-autoscaler-59998c8cbf-76rfm
I0501 01:12:30.779655 1 flags.go:52] FLAG: --address=":8085"
I0501 01:12:30.780073 1 flags.go:52] FLAG: --alsologtostderr="false"
I0501 01:12:30.780087 1 flags.go:52] FLAG: --application-metrics-count-limit="100"
I0501 01:12:30.780092 1 flags.go:52] FLAG: --azure-container-registry-config=""
I0501 01:12:30.780098 1 flags.go:52] FLAG: --balance-similar-node-groups="false"
I0501 01:12:30.780102 1 flags.go:52] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id"
I0501 01:12:30.780106 1 flags.go:52] FLAG: --cloud-config=""
I0501 01:12:30.780110 1 flags.go:52] FLAG: --cloud-provider="aws"
I0501 01:12:30.780113 1 flags.go:52] FLAG: --cloud-provider-gce-lb-src-cidrs="130.211.0.0/22,35.191.0.0/16,209.85.152.0/22,209.85.204.0/22"
I0501 01:12:30.780122 1 flags.go:52] FLAG: --cluster-name=""
I0501 01:12:30.780126 1 flags.go:52] FLAG: --configmap=""
I0501 01:12:30.780130 1 flags.go:52] FLAG: --container-hints="/etc/cadvisor/container_hints.json"
I0501 01:12:30.780134 1 flags.go:52] FLAG: --containerd="unix:///var/run/containerd.sock"
I0501 01:12:30.780144 1 flags.go:52] FLAG: --cores-total="0:320000"
I0501 01:12:30.780148 1 flags.go:52] FLAG: --docker="unix:///var/run/docker.sock"
I0501 01:12:30.780152 1 flags.go:52] FLAG: --docker-env-metadata-whitelist=""
I0501 01:12:30.780155 1 flags.go:52] FLAG: --docker-only="false"
I0501 01:12:30.780159 1 flags.go:52] FLAG: --docker-root="/var/lib/docker"
I0501 01:12:30.780162 1 flags.go:52] FLAG: --docker-tls="false"
I0501 01:12:30.780166 1 flags.go:52] FLAG: --docker-tls-ca="ca.pem"
I0501 01:12:30.780169 1 flags.go:52] FLAG: --docker-tls-cert="cert.pem"
I0501 01:12:30.780173 1 flags.go:52] FLAG: --docker-tls-key="key.pem"
I0501 01:12:30.780176 1 flags.go:52] FLAG: --enable-load-reader="false"
I0501 01:12:30.780180 1 flags.go:52] FLAG: --estimator="binpacking"
I0501 01:12:30.780183 1 flags.go:52] FLAG: --event-storage-age-limit="default=0"
I0501 01:12:30.780187 1 flags.go:52] FLAG: --event-storage-event-limit="default=0"
I0501 01:12:30.780190 1 flags.go:52] FLAG: --expander="least-waste"
I0501 01:12:30.780199 1 flags.go:52] FLAG: --expendable-pods-priority-cutoff="0"
I0501 01:12:30.780202 1 flags.go:52] FLAG: --gke-api-endpoint=""
I0501 01:12:30.780206 1 flags.go:52] FLAG: --global-housekeeping-interval="1m0s"
I0501 01:12:30.780210 1 flags.go:52] FLAG: --google-json-key=""
I0501 01:12:30.780213 1 flags.go:52] FLAG: --housekeeping-interval="10s"
I0501 01:12:30.780217 1 flags.go:52] FLAG: --httptest.serve=""
I0501 01:12:30.780220 1 flags.go:52] FLAG: --kubeconfig=""
I0501 01:12:30.780223 1 flags.go:52] FLAG: --kubernetes=""
I0501 01:12:30.780227 1 flags.go:52] FLAG: --leader-elect="true"
I0501 01:12:30.780236 1 flags.go:52] FLAG: --leader-elect-lease-duration="15s"
I0501 01:12:30.780242 1 flags.go:52] FLAG: --leader-elect-renew-deadline="10s"
I0501 01:12:30.780245 1 flags.go:52] FLAG: --leader-elect-resource-lock="endpoints"
I0501 01:12:30.780249 1 flags.go:52] FLAG: --leader-elect-retry-period="2s"
I0501 01:12:30.780252 1 flags.go:52] FLAG: --log-backtrace-at=":0"
I0501 01:12:30.780264 1 flags.go:52] FLAG: --log-cadvisor-usage="false"
I0501 01:12:30.780270 1 flags.go:52] FLAG: --log-dir=""
I0501 01:12:30.780274 1 flags.go:52] FLAG: --logtostderr="false"
I0501 01:12:30.780277 1 flags.go:52] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id"
I0501 01:12:30.780282 1 flags.go:52] FLAG: --max-autoprovisioned-node-group-count="15"
I0501 01:12:30.780285 1 flags.go:52] FLAG: --max-empty-bulk-delete="10"
I0501 01:12:30.780289 1 flags.go:52] FLAG: --max-failing-time="15m0s"
I0501 01:12:30.780292 1 flags.go:52] FLAG: --max-graceful-termination-sec="600"
I0501 01:12:30.780296 1 flags.go:52] FLAG: --max-inactivity="10m0s"
I0501 01:12:30.780299 1 flags.go:52] FLAG: --max-node-provision-time="15m0s"
I0501 01:12:30.780305 1 flags.go:52] FLAG: --max-nodes-total="0"
I0501 01:12:30.780310 1 flags.go:52] FLAG: --max-total-unready-percentage="33"
I0501 01:12:30.780315 1 flags.go:52] FLAG: --memory-total="0:6400000"
I0501 01:12:30.780318 1 flags.go:52] FLAG: --min-replica-count="0"
I0501 01:12:30.780321 1 flags.go:52] FLAG: --namespace="kube-system"
I0501 01:12:30.780325 1 flags.go:52] FLAG: --node-autoprovisioning-enabled="false"
I0501 01:12:30.780329 1 flags.go:52] FLAG: --node-group-auto-discovery="asg:tag=k8s.io/cluster-autoscaler/enabled,kubernetes.io/cluster/opsdev-pr"
I0501 01:12:30.780335 1 flags.go:52] FLAG: --nodes="[]"
I0501 01:12:30.780339 1 flags.go:52] FLAG: --ok-total-unready-count="3"
I0501 01:12:30.780343 1 flags.go:52] FLAG: --scale-down-candidates-pool-min-count="50"
I0501 01:12:30.780346 1 flags.go:52] FLAG: --scale-down-candidates-pool-ratio="0.1"
I0501 01:12:30.780355 1 flags.go:52] FLAG: --scale-down-delay-after-add="10m0s"
I0501 01:12:30.780359 1 flags.go:52] FLAG: --scale-down-delay-after-delete="10s"
I0501 01:12:30.780362 1 flags.go:52] FLAG: --scale-down-delay-after-failure="3m0s"
I0501 01:12:30.780366 1 flags.go:52] FLAG: --scale-down-enabled="true"
I0501 01:12:30.780369 1 flags.go:52] FLAG: --scale-down-non-empty-candidates-count="30"
I0501 01:12:30.780372 1 flags.go:52] FLAG: --scale-down-unneeded-time="10m0s"
I0501 01:12:30.780376 1 flags.go:52] FLAG: --scale-down-unready-time="20m0s"
I0501 01:12:30.780380 1 flags.go:52] FLAG: --scale-down-utilization-threshold="0.5"
I0501 01:12:30.780383 1 flags.go:52] FLAG: --scan-interval="10s"
I0501 01:12:30.780387 1 flags.go:52] FLAG: --skip-nodes-with-local-storage="false"
I0501 01:12:30.780390 1 flags.go:52] FLAG: --skip-nodes-with-system-pods="false"
I0501 01:12:30.780393 1 flags.go:52] FLAG: --stderrthreshold="0"
I0501 01:12:30.780397 1 flags.go:52] FLAG: --storage-driver-buffer-duration="1m0s"
I0501 01:12:30.780400 1 flags.go:52] FLAG: --storage-driver-db="cadvisor"
I0501 01:12:30.780404 1 flags.go:52] FLAG: --storage-driver-host="localhost:8086"
I0501 01:12:30.780407 1 flags.go:52] FLAG: --storage-driver-password="root"
I0501 01:12:30.780411 1 flags.go:52] FLAG: --storage-driver-secure="false"
I0501 01:12:30.780414 1 flags.go:52] FLAG: --storage-driver-table="stats"
I0501 01:12:30.780418 1 flags.go:52] FLAG: --storage-driver-user="root"
I0501 01:12:30.780421 1 flags.go:52] FLAG: --test.bench=""
I0501 01:12:30.780424 1 flags.go:52] FLAG: --test.benchmem="false"
I0501 01:12:30.780428 1 flags.go:52] FLAG: --test.benchtime="1s"
I0501 01:12:30.780431 1 flags.go:52] FLAG: --test.blockprofile=""
I0501 01:12:30.780435 1 flags.go:52] FLAG: --test.blockprofilerate="1"
I0501 01:12:30.780438 1 flags.go:52] FLAG: --test.count="1"
I0501 01:12:30.780442 1 flags.go:52] FLAG: --test.coverprofile=""
I0501 01:12:30.780445 1 flags.go:52] FLAG: --test.cpu=""
I0501 01:12:30.780448 1 flags.go:52] FLAG: --test.cpuprofile=""
I0501 01:12:30.780451 1 flags.go:52] FLAG: --test.memprofile=""
I0501 01:12:30.780455 1 flags.go:52] FLAG: --test.memprofilerate="0"
I0501 01:12:30.780458 1 flags.go:52] FLAG: --test.mutexprofile=""
I0501 01:12:30.780461 1 flags.go:52] FLAG: --test.mutexprofilefraction="1"
I0501 01:12:30.780470 1 flags.go:52] FLAG: --test.outputdir=""
I0501 01:12:30.780473 1 flags.go:52] FLAG: --test.parallel="2"
I0501 01:12:30.780477 1 flags.go:52] FLAG: --test.run=""
I0501 01:12:30.780480 1 flags.go:52] FLAG: --test.short="false"
I0501 01:12:30.780484 1 flags.go:52] FLAG: --test.timeout="0s"
I0501 01:12:30.780487 1 flags.go:52] FLAG: --test.trace=""
I0501 01:12:30.780490 1 flags.go:52] FLAG: --test.v="false"
I0501 01:12:30.780493 1 flags.go:52] FLAG: --v="4"
I0501 01:12:30.780497 1 flags.go:52] FLAG: --version="false"
I0501 01:12:30.780508 1 flags.go:52] FLAG: --vmodule=""
I0501 01:12:30.780514 1 flags.go:52] FLAG: --write-status-configmap="true"
I0501 01:12:30.780522 1 main.go:287] Cluster Autoscaler 1.1.0
I0501 01:12:30.887976 1 leaderelection.go:174] attempting to acquire leader lease...
I0501 01:12:30.896435 1 leaderelection.go:243] lock is held by cluster-autoscaler-59998c8cbf-9hqwq and has not yet expired
I0501 01:12:30.896460 1 leaderelection.go:180] failed to acquire lease kube-system/cluster-autoscaler
I0501 01:12:34.376150 1 leaderelection.go:243] lock is held by cluster-autoscaler-59998c8cbf-9hqwq and has not yet expired
I0501 01:12:34.376177 1 leaderelection.go:180] failed to acquire lease kube-system/cluster-autoscaler
I0501 01:12:38.672984 1 leaderelection.go:243] lock is held by cluster-autoscaler-59998c8cbf-9hqwq and has not yet expired
I0501 01:12:38.673010 1 leaderelection.go:180] failed to acquire lease kube-system/cluster-autoscaler
I0501 01:12:42.274795 1 leaderelection.go:243] lock is held by cluster-autoscaler-59998c8cbf-9hqwq and has not yet expired
I0501 01:12:42.274820 1 leaderelection.go:180] failed to acquire lease kube-system/cluster-autoscaler
I0501 01:12:45.328539 1 leaderelection.go:243] lock is held by cluster-autoscaler-59998c8cbf-9hqwq and has not yet expired
I0501 01:12:45.328563 1 leaderelection.go:180] failed to acquire lease kube-system/cluster-autoscaler
I0501 01:12:48.355946 1 leaderelection.go:184] successfully acquired lease kube-system/cluster-autoscaler
I0501 01:12:48.374092 1 predicates.go:125] Using predicate PodFitsResources
I0501 01:12:48.374457 1 predicates.go:125] Using predicate GeneralPredicates
I0501 01:12:48.374507 1 predicates.go:125] Using predicate PodToleratesNodeTaints
I0501 01:12:48.374532 1 predicates.go:125] Using predicate CheckVolumeBinding
I0501 01:12:48.374547 1 predicates.go:125] Using predicate MaxAzureDiskVolumeCount
I0501 01:12:48.374560 1 predicates.go:125] Using predicate NoDiskConflict
I0501 01:12:48.374573 1 predicates.go:125] Using predicate CheckNodeDiskPressure
I0501 01:12:48.374587 1 predicates.go:125] Using predicate MaxGCEPDVolumeCount
I0501 01:12:48.374636 1 predicates.go:125] Using predicate MatchInterPodAffinity
I0501 01:12:48.374650 1 predicates.go:125] Using predicate MaxEBSVolumeCount
I0501 01:12:48.374675 1 predicates.go:125] Using predicate ready
I0501 01:12:48.374690 1 predicates.go:125] Using predicate CheckNodeMemoryPressure
I0501 01:12:48.374723 1 predicates.go:125] Using predicate NoVolumeZoneConflict
I0501 01:12:48.374747 1 predicates.go:125] Using predicate CheckNodeCondition
I0501 01:12:48.374971 1 factory.go:33] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"cluster-autoscaler", UID:"13848483-4cda-11e8-ab55-06e962aa0a20", APIVersion:"v1", ResourceVersion:"2266", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' cluster-autoscaler-59998c8cbf-76rfm became leader
I0501 01:12:48.375209 1 reflector.go:202] Starting reflector *v1beta1.DaemonSet (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:293
I0501 01:12:48.375372 1 reflector.go:240] Listing and watching *v1beta1.DaemonSet from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:293
I0501 01:12:48.375663 1 reflector.go:202] Starting reflector *v1beta1.ReplicaSet (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.375759 1 reflector.go:240] Listing and watching *v1beta1.ReplicaSet from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.376068 1 reflector.go:202] Starting reflector *v1.Pod (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:149
I0501 01:12:48.376109 1 reflector.go:240] Listing and watching *v1.Pod from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:149
I0501 01:12:48.376208 1 reflector.go:202] Starting reflector *v1.PersistentVolume (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.376226 1 reflector.go:240] Listing and watching *v1.PersistentVolume from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.376427 1 reflector.go:202] Starting reflector *v1.Pod (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:174
I0501 01:12:48.376471 1 reflector.go:240] Listing and watching *v1.Pod from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:174
I0501 01:12:48.376836 1 reflector.go:202] Starting reflector *v1.Node (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.376883 1 reflector.go:240] Listing and watching *v1.Node from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.376849 1 reflector.go:202] Starting reflector *v1.Node (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:212
I0501 01:12:48.377250 1 reflector.go:240] Listing and watching *v1.Node from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:212
I0501 01:12:48.377447 1 reflector.go:202] Starting reflector *v1.PersistentVolumeClaim (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.377464 1 reflector.go:240] Listing and watching *v1.PersistentVolumeClaim from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.377789 1 reflector.go:202] Starting reflector *v1.ReplicationController (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.377805 1 reflector.go:240] Listing and watching *v1.ReplicationController from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.376869 1 reflector.go:202] Starting reflector *v1beta1.PodDisruptionBudget (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:266
I0501 01:12:48.378117 1 reflector.go:240] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:266
I0501 01:12:48.376861 1 reflector.go:202] Starting reflector *v1.Node (1h0m0s) from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:239
I0501 01:12:48.378411 1 reflector.go:240] Listing and watching *v1.Node from k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:239
I0501 01:12:48.378487 1 reflector.go:202] Starting reflector *v1beta1.StatefulSet (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.378504 1 reflector.go:240] Listing and watching *v1beta1.StatefulSet from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.378204 1 reflector.go:202] Starting reflector *v1.Service (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.378722 1 reflector.go:240] Listing and watching *v1.Service from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.377122 1 reflector.go:202] Starting reflector *v1.StorageClass (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.379057 1 reflector.go:240] Listing and watching *v1.StorageClass from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.379225 1 reflector.go:202] Starting reflector *v1beta1.PodDisruptionBudget (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.379242 1 reflector.go:240] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.379598 1 reflector.go:202] Starting reflector *v1.Pod (0s) from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.379639 1 reflector.go:240] Listing and watching *v1.Pod from k8s.io/autoscaler/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go:86
I0501 01:12:48.682860 1 request.go:480] Throttling request took 302.967176ms, request: GET:https://10.3.0.1:443/api/v1/pods?limit=500&resourceVersion=0
I0501 01:12:49.077404 1 request.go:480] Throttling request took 690.452139ms, request: PUT:https://10.3.0.1:443/api/v1/namespaces/kube-system/configmaps/cluster-autoscaler-status
I0501 01:12:50.363258 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:12:52.375167 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:12:54.383047 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:12:56.477755 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:12:58.486159 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:00.493528 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:02.579527 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:04.586943 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:06.672999 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:08.680739 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:10.689813 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:12.701156 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:14.708931 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:16.777731 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:18.785294 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:20.793157 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:22.883687 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:24.891323 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:27.077699 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:29.086355 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:31.093733 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:33.287254 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:35.296370 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:37.378111 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:39.385737 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:41.480032 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:43.491695 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:45.573120 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:47.580531 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:49.673030 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:51.773100 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:53.883976 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:55.891197 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:57.980535 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:13:59.990069 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:14:02.073114 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:14:04.173053 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:14:06.180588 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:14:08.280209 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:14:10.372982 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler
I0501 01:14:12.380475 1 leaderelection.go:199] successfully renewed lease kube-system/cluster-autoscaler