Created
March 26, 2022 03:56
-
-
Save soapergem/0b9984d5bf8c7a7894d74dd2e5b692d4 to your computer and use it in GitHub Desktop.
journalctl -u kubelet
This file has been truncated, but you can view the full file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
-- Journal begins at Fri 2022-03-25 13:12:20 CDT, ends at Fri 2022-03-25 22:51:13 CDT. -- | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:35:49 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:35:50 k8s-master-1 kubelet[2894775]: I0325 18:35:50.244530 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:35:50 k8s-master-1 kubelet[2894775]: E0325 18:35:50.245402 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:35:51 k8s-master-1 kubelet[2894775]: E0325 18:35:51.169344 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:35:51 k8s-master-1 kubelet[2894775]: E0325 18:35:51.950845 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:35:54 k8s-master-1 kubelet[2894775]: I0325 18:35:54.284765 2894775 scope.go:110] "RemoveContainer" containerID="629f0105cb2a3632178490656ec0f6b6b5b9c8db4dd35dbe66a5f6b6fb4d429b" | |
Mar 25 18:35:54 k8s-master-1 kubelet[2894775]: E0325 18:35:54.285454 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:35:54 k8s-master-1 kubelet[2894775]: E0325 18:35:54.291614 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 34, 58, 314240357, time.Local), Count:8495, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:35:54 k8s-master-1 kubelet[2894775]: E0325 18:35:54.336375 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:35:54 k8s-master-1 kubelet[2894775]: E0325 18:35:54.337347 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:35:54 k8s-master-1 kubelet[2894775]: E0325 18:35:54.338652 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:35:54 k8s-master-1 kubelet[2894775]: E0325 18:35:54.340089 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:35:54 k8s-master-1 kubelet[2894775]: E0325 18:35:54.341005 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:35:54 k8s-master-1 kubelet[2894775]: E0325 18:35:54.341062 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:35:55 k8s-master-1 kubelet[2894775]: E0325 18:35:55.685829 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:d479a7723b37197dba97dd4b4f39699e7b164882ecaf85286948955a387b48e9\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:35:55 k8s-master-1 kubelet[2894775]: E0325 18:35:55.690142 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:35:55 k8s-master-1 kubelet[2894775]: E0325 18:35:55.690363 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:35:55 k8s-master-1 kubelet[2894775]: E0325 18:35:55.690487 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:35:55 k8s-master-1 kubelet[2894775]: I0325 18:35:55.690604 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:35:56 k8s-master-1 kubelet[2894775]: E0325 18:35:56.171648 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:35:58 k8s-master-1 kubelet[2894775]: I0325 18:35:58.298548 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:35:58 k8s-master-1 kubelet[2894775]: E0325 18:35:58.301323 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:35:58 k8s-master-1 kubelet[2894775]: E0325 18:35:58.952766 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:35:59 k8s-master-1 kubelet[2894775]: I0325 18:35:59.245705 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:35:59 k8s-master-1 kubelet[2894775]: I0325 18:35:59.247771 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:35:59 k8s-master-1 kubelet[2894775]: I0325 18:35:59.249888 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:35:59 k8s-master-1 kubelet[2894775]: I0325 18:35:59.251932 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:35:59 k8s-master-1 kubelet[2894775]: I0325 18:35:59.253918 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:00 k8s-master-1 kubelet[2894775]: I0325 18:36:00.244852 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:36:00 k8s-master-1 kubelet[2894775]: E0325 18:36:00.249269 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:36:01 k8s-master-1 kubelet[2894775]: E0325 18:36:01.174587 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: I0325 18:36:02.245616 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: E0325 18:36:02.247382 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: W0325 18:36:02.374812 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:02 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:04 k8s-master-1 kubelet[2894775]: E0325 18:36:04.294113 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 34, 58, 314240357, time.Local), Count:8495, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:36:04 k8s-master-1 kubelet[2894775]: E0325 18:36:04.652909 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:04 k8s-master-1 kubelet[2894775]: E0325 18:36:04.654442 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:04 k8s-master-1 kubelet[2894775]: E0325 18:36:04.656091 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:04 k8s-master-1 kubelet[2894775]: E0325 18:36:04.657634 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:04 k8s-master-1 kubelet[2894775]: E0325 18:36:04.659077 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:04 k8s-master-1 kubelet[2894775]: E0325 18:36:04.659188 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:36:05 k8s-master-1 kubelet[2894775]: E0325 18:36:05.731314 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:d479a7723b37197dba97dd4b4f39699e7b164882ecaf85286948955a387b48e9\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:36:05 k8s-master-1 kubelet[2894775]: E0325 18:36:05.734647 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:36:05 k8s-master-1 kubelet[2894775]: E0325 18:36:05.734823 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:36:05 k8s-master-1 kubelet[2894775]: E0325 18:36:05.734917 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:36:05 k8s-master-1 kubelet[2894775]: I0325 18:36:05.734998 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:36:05 k8s-master-1 kubelet[2894775]: E0325 18:36:05.955108 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:06 k8s-master-1 kubelet[2894775]: E0325 18:36:06.177538 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:08 k8s-master-1 kubelet[2894775]: I0325 18:36:08.302467 2894775 scope.go:110] "RemoveContainer" containerID="629f0105cb2a3632178490656ec0f6b6b5b9c8db4dd35dbe66a5f6b6fb4d429b" | |
Mar 25 18:36:08 k8s-master-1 kubelet[2894775]: E0325 18:36:08.305254 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:36:09 k8s-master-1 kubelet[2894775]: I0325 18:36:09.245267 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:09 k8s-master-1 kubelet[2894775]: I0325 18:36:09.247070 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:09 k8s-master-1 kubelet[2894775]: I0325 18:36:09.249886 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:09 k8s-master-1 kubelet[2894775]: I0325 18:36:09.252490 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:09 k8s-master-1 kubelet[2894775]: I0325 18:36:09.254329 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:11 k8s-master-1 kubelet[2894775]: E0325 18:36:11.180756 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:11 k8s-master-1 kubelet[2894775]: I0325 18:36:11.284979 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:36:11 k8s-master-1 kubelet[2894775]: E0325 18:36:11.286693 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:36:12 k8s-master-1 kubelet[2894775]: E0325 18:36:12.957880 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: W0325 18:36:13.122149 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:13 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:14 k8s-master-1 kubelet[2894775]: I0325 18:36:14.245174 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:36:14 k8s-master-1 kubelet[2894775]: I0325 18:36:14.245377 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:36:14 k8s-master-1 kubelet[2894775]: E0325 18:36:14.246989 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:36:14 k8s-master-1 kubelet[2894775]: E0325 18:36:14.248679 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:36:14 k8s-master-1 kubelet[2894775]: E0325 18:36:14.298581 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 34, 58, 314240357, time.Local), Count:8495, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:36:14 k8s-master-1 kubelet[2894775]: E0325 18:36:14.949600 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:14 k8s-master-1 kubelet[2894775]: E0325 18:36:14.951072 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:14 k8s-master-1 kubelet[2894775]: E0325 18:36:14.952834 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:14 k8s-master-1 kubelet[2894775]: E0325 18:36:14.954702 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:14 k8s-master-1 kubelet[2894775]: E0325 18:36:14.956340 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:14 k8s-master-1 kubelet[2894775]: E0325 18:36:14.956457 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:36:15 k8s-master-1 kubelet[2894775]: E0325 18:36:15.783187 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:d479a7723b37197dba97dd4b4f39699e7b164882ecaf85286948955a387b48e9\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:36:15 k8s-master-1 kubelet[2894775]: E0325 18:36:15.788246 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:36:15 k8s-master-1 kubelet[2894775]: E0325 18:36:15.790352 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:36:15 k8s-master-1 kubelet[2894775]: E0325 18:36:15.790524 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:36:15 k8s-master-1 kubelet[2894775]: I0325 18:36:15.790629 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:36:16 k8s-master-1 kubelet[2894775]: E0325 18:36:16.183325 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: W0325 18:36:19.038317 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: W0325 18:36:19.130464 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:d479a7723b37197dba97dd4b4f39699e7b164882ecaf85286948955a387b48e9": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: I0325 18:36:19.245491 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: I0325 18:36:19.247325 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: I0325 18:36:19.249135 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: I0325 18:36:19.250944 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: I0325 18:36:19.253438 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:19 k8s-master-1 kubelet[2894775]: E0325 18:36:19.960557 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: E0325 18:36:21.186047 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: I0325 18:36:21.316637 2894775 scope.go:110] "RemoveContainer" containerID="629f0105cb2a3632178490656ec0f6b6b5b9c8db4dd35dbe66a5f6b6fb4d429b" | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: E0325 18:36:21.319854 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: W0325 18:36:21.819459 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:21 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: W0325 18:36:23.040840 2894775 container.go:590] Failed to update stats for container "/system.slice/kubelet.service": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: E0325 18:36:23.554901 2894775 projected.go:199] Error preparing data for projected volume kube-api-access-twh7b for pod kube-system/kube-proxy-fgm87: failed to fetch token: Post "https://192.168.1.194:6443/api/v1/namespaces/kube-system/serviceaccounts/kube-proxy/token": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:23 k8s-master-1 kubelet[2894775]: E0325 18:36:23.555262 2894775 nestedpendingoperations.go:335] Operation for "{volumeName:kubernetes.io/projected/95ab2fd8-d229-475f-b404-59e5ad925195-kube-api-access-twh7b podName:95ab2fd8-d229-475f-b404-59e5ad925195 nodeName:}" failed. No retries permitted until 2022-03-25 18:38:25.555152212 -0500 CDT m=+164671.944841285 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-twh7b" (UniqueName: "kubernetes.io/projected/95ab2fd8-d229-475f-b404-59e5ad925195-kube-api-access-twh7b") pod "kube-proxy-fgm87" (UID: "95ab2fd8-d229-475f-b404-59e5ad925195") : failed to fetch token: Post "https://192.168.1.194:6443/api/v1/namespaces/kube-system/serviceaccounts/kube-proxy/token": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:24 k8s-master-1 kubelet[2894775]: E0325 18:36:24.301215 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 34, 58, 314240357, time.Local), Count:8495, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.068914 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.070533 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.072182 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.073700 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.075240 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.075352 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: I0325 18:36:25.320248 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.321662 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: I0325 18:36:25.330269 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.332384 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: W0325 18:36:25.634958 2894775 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.635255 2894775 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.838223 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:d479a7723b37197dba97dd4b4f39699e7b164882ecaf85286948955a387b48e9\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: W0325 18:36:25.841829 2894775 container.go:590] Failed to update stats for container "/kubepods.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.842130 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.842333 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: E0325 18:36:25.842485 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:36:25 k8s-master-1 kubelet[2894775]: I0325 18:36:25.842595 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: E0325 18:36:26.188574 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: I0325 18:36:26.245194 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: E0325 18:36:26.248717 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: W0325 18:36:26.525276 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:26 k8s-master-1 kubelet[2894775]: E0325 18:36:26.962944 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:29 k8s-master-1 kubelet[2894775]: I0325 18:36:29.245754 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:29 k8s-master-1 kubelet[2894775]: I0325 18:36:29.248384 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:29 k8s-master-1 kubelet[2894775]: I0325 18:36:29.250251 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:29 k8s-master-1 kubelet[2894775]: I0325 18:36:29.252046 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:29 k8s-master-1 kubelet[2894775]: I0325 18:36:29.253921 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:31 k8s-master-1 kubelet[2894775]: E0325 18:36:31.190616 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:33 k8s-master-1 kubelet[2894775]: I0325 18:36:33.313151 2894775 scope.go:110] "RemoveContainer" containerID="629f0105cb2a3632178490656ec0f6b6b5b9c8db4dd35dbe66a5f6b6fb4d429b" | |
Mar 25 18:36:33 k8s-master-1 kubelet[2894775]: E0325 18:36:33.314647 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:36:33 k8s-master-1 kubelet[2894775]: E0325 18:36:33.965147 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: E0325 18:36:34.304291 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 34, 58, 314240357, time.Local), Count:8495, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: W0325 18:36:34.390300 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: W0325 18:36:34.482500 2894775 reflector.go:324] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:34 k8s-master-1 kubelet[2894775]: E0325 18:36:34.483499 2894775 reflector.go:138] object-"kube-system"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:35 k8s-master-1 kubelet[2894775]: E0325 18:36:35.451989 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:35 k8s-master-1 kubelet[2894775]: E0325 18:36:35.453454 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:35 k8s-master-1 kubelet[2894775]: E0325 18:36:35.455042 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:35 k8s-master-1 kubelet[2894775]: E0325 18:36:35.456997 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:35 k8s-master-1 kubelet[2894775]: E0325 18:36:35.459046 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:35 k8s-master-1 kubelet[2894775]: E0325 18:36:35.459162 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:36:35 k8s-master-1 kubelet[2894775]: E0325 18:36:35.884948 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:d479a7723b37197dba97dd4b4f39699e7b164882ecaf85286948955a387b48e9\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:36:35 k8s-master-1 kubelet[2894775]: E0325 18:36:35.887628 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:36:35 k8s-master-1 kubelet[2894775]: E0325 18:36:35.890408 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:36:35 k8s-master-1 kubelet[2894775]: E0325 18:36:35.890581 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:36:35 k8s-master-1 kubelet[2894775]: I0325 18:36:35.890704 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:36:36 k8s-master-1 kubelet[2894775]: E0325 18:36:36.192752 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: W0325 18:36:38.835088 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:38 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: I0325 18:36:39.247150 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: I0325 18:36:39.249070 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: I0325 18:36:39.250857 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: I0325 18:36:39.252973 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: I0325 18:36:39.255311 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: I0325 18:36:39.318380 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: E0325 18:36:39.320251 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: I0325 18:36:39.321362 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: E0325 18:36:39.323295 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: I0325 18:36:39.380179 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: E0325 18:36:39.382835 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: W0325 18:36:39.567398 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:39 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:40 k8s-master-1 kubelet[2894775]: E0325 18:36:40.967673 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:41 k8s-master-1 kubelet[2894775]: E0325 18:36:41.195176 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:44 k8s-master-1 kubelet[2894775]: E0325 18:36:44.307645 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 34, 58, 314240357, time.Local), Count:8495, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:36:44 k8s-master-1 kubelet[2894775]: E0325 18:36:44.308260 2894775 event.go:221] Unable to write event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16dfc3d2b098c165", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 25, 18, 34, 58, 314240357, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 34, 58, 314240357, time.Local), Count:1, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}' (retry limit exceeded!) | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: E0325 18:36:45.602860 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: E0325 18:36:45.604425 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: E0325 18:36:45.605876 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: E0325 18:36:45.607470 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: E0325 18:36:45.609088 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: E0325 18:36:45.609232 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: W0325 18:36:45.774502 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: E0325 18:36:45.929990 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:d479a7723b37197dba97dd4b4f39699e7b164882ecaf85286948955a387b48e9\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: E0325 18:36:45.932383 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: E0325 18:36:45.933977 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: E0325 18:36:45.934112 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:36:45 k8s-master-1 kubelet[2894775]: I0325 18:36:45.934201 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:36:46 k8s-master-1 kubelet[2894775]: E0325 18:36:46.197155 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:46 k8s-master-1 kubelet[2894775]: I0325 18:36:46.306865 2894775 scope.go:110] "RemoveContainer" containerID="629f0105cb2a3632178490656ec0f6b6b5b9c8db4dd35dbe66a5f6b6fb4d429b" | |
Mar 25 18:36:46 k8s-master-1 kubelet[2894775]: E0325 18:36:46.308369 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: W0325 18:36:47.159643 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:47 k8s-master-1 kubelet[2894775]: E0325 18:36:47.970396 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: W0325 18:36:48.958703 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:48 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:49 k8s-master-1 kubelet[2894775]: I0325 18:36:49.245420 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:49 k8s-master-1 kubelet[2894775]: I0325 18:36:49.247166 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:49 k8s-master-1 kubelet[2894775]: I0325 18:36:49.248969 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:49 k8s-master-1 kubelet[2894775]: I0325 18:36:49.250681 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:49 k8s-master-1 kubelet[2894775]: I0325 18:36:49.252447 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:51 k8s-master-1 kubelet[2894775]: E0325 18:36:51.199367 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:51 k8s-master-1 kubelet[2894775]: I0325 18:36:51.295444 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:36:51 k8s-master-1 kubelet[2894775]: E0325 18:36:51.299283 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:36:52 k8s-master-1 kubelet[2894775]: I0325 18:36:52.245061 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:36:52 k8s-master-1 kubelet[2894775]: E0325 18:36:52.247195 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: W0325 18:36:53.262420 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:36:53 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:36:54 k8s-master-1 kubelet[2894775]: I0325 18:36:54.244794 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:36:54 k8s-master-1 kubelet[2894775]: E0325 18:36:54.246673 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:36:54 k8s-master-1 kubelet[2894775]: E0325 18:36:54.972401 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:36:55 k8s-master-1 kubelet[2894775]: E0325 18:36:55.825506 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:55 k8s-master-1 kubelet[2894775]: E0325 18:36:55.827074 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:55 k8s-master-1 kubelet[2894775]: E0325 18:36:55.828655 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:55 k8s-master-1 kubelet[2894775]: E0325 18:36:55.830201 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:55 k8s-master-1 kubelet[2894775]: E0325 18:36:55.831719 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:55 k8s-master-1 kubelet[2894775]: E0325 18:36:55.831891 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:36:55 k8s-master-1 kubelet[2894775]: E0325 18:36:55.971581 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:d479a7723b37197dba97dd4b4f39699e7b164882ecaf85286948955a387b48e9\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:36:55 k8s-master-1 kubelet[2894775]: E0325 18:36:55.974857 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:36:55 k8s-master-1 kubelet[2894775]: E0325 18:36:55.975045 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:36:55 k8s-master-1 kubelet[2894775]: E0325 18:36:55.975146 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:36:55 k8s-master-1 kubelet[2894775]: I0325 18:36:55.975308 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:36:56 k8s-master-1 kubelet[2894775]: E0325 18:36:56.201500 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:36:59 k8s-master-1 kubelet[2894775]: I0325 18:36:59.245402 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:59 k8s-master-1 kubelet[2894775]: I0325 18:36:59.247040 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:59 k8s-master-1 kubelet[2894775]: I0325 18:36:59.248803 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:59 k8s-master-1 kubelet[2894775]: I0325 18:36:59.251793 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:59 k8s-master-1 kubelet[2894775]: I0325 18:36:59.254056 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:36:59 k8s-master-1 kubelet[2894775]: I0325 18:36:59.294035 2894775 scope.go:110] "RemoveContainer" containerID="629f0105cb2a3632178490656ec0f6b6b5b9c8db4dd35dbe66a5f6b6fb4d429b" | |
Mar 25 18:36:59 k8s-master-1 kubelet[2894775]: E0325 18:36:59.295374 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:37:01 k8s-master-1 kubelet[2894775]: E0325 18:37:01.204093 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:01 k8s-master-1 kubelet[2894775]: E0325 18:37:01.975162 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:05 k8s-master-1 kubelet[2894775]: I0325 18:37:05.322018 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:37:05 k8s-master-1 kubelet[2894775]: E0325 18:37:05.324418 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:37:05 k8s-master-1 kubelet[2894775]: E0325 18:37:05.998560 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: E0325 18:37:06.001017 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: E0325 18:37:06.002750 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: E0325 18:37:06.004732 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: E0325 18:37:06.006483 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: E0325 18:37:06.006609 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: E0325 18:37:06.018902 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:d479a7723b37197dba97dd4b4f39699e7b164882ecaf85286948955a387b48e9\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: E0325 18:37:06.022670 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: E0325 18:37:06.022902 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: E0325 18:37:06.023029 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: I0325 18:37:06.023142 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: E0325 18:37:06.206506 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: I0325 18:37:06.244834 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:37:06 k8s-master-1 kubelet[2894775]: E0325 18:37:06.248233 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:37:08 k8s-master-1 kubelet[2894775]: E0325 18:37:08.978228 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:09 k8s-master-1 kubelet[2894775]: I0325 18:37:09.245244 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:37:09 k8s-master-1 kubelet[2894775]: I0325 18:37:09.245558 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:09 k8s-master-1 kubelet[2894775]: E0325 18:37:09.246999 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:37:09 k8s-master-1 kubelet[2894775]: I0325 18:37:09.248732 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:09 k8s-master-1 kubelet[2894775]: I0325 18:37:09.250509 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:09 k8s-master-1 kubelet[2894775]: I0325 18:37:09.252736 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:09 k8s-master-1 kubelet[2894775]: I0325 18:37:09.254460 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:10 k8s-master-1 kubelet[2894775]: I0325 18:37:10.281610 2894775 scope.go:110] "RemoveContainer" containerID="629f0105cb2a3632178490656ec0f6b6b5b9c8db4dd35dbe66a5f6b6fb4d429b" | |
Mar 25 18:37:10 k8s-master-1 kubelet[2894775]: E0325 18:37:10.283142 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:37:11 k8s-master-1 kubelet[2894775]: E0325 18:37:11.209652 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: W0325 18:37:14.063678 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: W0325 18:37:14.554513 2894775 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:14 k8s-master-1 kubelet[2894775]: E0325 18:37:14.554802 2894775 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:15 k8s-master-1 kubelet[2894775]: E0325 18:37:15.980214 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: E0325 18:37:16.014657 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: E0325 18:37:16.017152 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: E0325 18:37:16.018687 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: E0325 18:37:16.020263 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: E0325 18:37:16.021758 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: E0325 18:37:16.021871 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: E0325 18:37:16.060046 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:d479a7723b37197dba97dd4b4f39699e7b164882ecaf85286948955a387b48e9\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: E0325 18:37:16.063015 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: E0325 18:37:16.065697 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: E0325 18:37:16.065847 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: I0325 18:37:16.065947 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:37:16 k8s-master-1 kubelet[2894775]: E0325 18:37:16.211605 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: W0325 18:37:17.063206 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:17 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:19 k8s-master-1 kubelet[2894775]: I0325 18:37:19.245228 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:19 k8s-master-1 kubelet[2894775]: I0325 18:37:19.246962 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:19 k8s-master-1 kubelet[2894775]: I0325 18:37:19.249615 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:19 k8s-master-1 kubelet[2894775]: I0325 18:37:19.251723 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:19 k8s-master-1 kubelet[2894775]: I0325 18:37:19.253895 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:19 k8s-master-1 kubelet[2894775]: I0325 18:37:19.307677 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:37:19 k8s-master-1 kubelet[2894775]: E0325 18:37:19.310151 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:37:19 k8s-master-1 kubelet[2894775]: W0325 18:37:19.719049 2894775 reflector.go:324] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:19 k8s-master-1 kubelet[2894775]: E0325 18:37:19.719351 2894775 reflector.go:138] object-"kube-system"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:20 k8s-master-1 kubelet[2894775]: I0325 18:37:20.244802 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:37:20 k8s-master-1 kubelet[2894775]: E0325 18:37:20.248344 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:37:21 k8s-master-1 kubelet[2894775]: E0325 18:37:21.214261 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:22 k8s-master-1 kubelet[2894775]: E0325 18:37:22.982623 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:23 k8s-master-1 kubelet[2894775]: I0325 18:37:23.245900 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:37:23 k8s-master-1 kubelet[2894775]: E0325 18:37:23.252040 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:37:23 k8s-master-1 kubelet[2894775]: I0325 18:37:23.312732 2894775 scope.go:110] "RemoveContainer" containerID="629f0105cb2a3632178490656ec0f6b6b5b9c8db4dd35dbe66a5f6b6fb4d429b" | |
Mar 25 18:37:23 k8s-master-1 kubelet[2894775]: E0325 18:37:23.313602 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: E0325 18:37:26.080792 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: E0325 18:37:26.081123 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: W0325 18:37:26.081926 2894775 container.go:590] Failed to update stats for container "/kubepods.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: E0325 18:37:26.082063 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: E0325 18:37:26.082151 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: I0325 18:37:26.082211 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: E0325 18:37:26.215780 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: I0325 18:37:26.378968 2894775 scope.go:110] "RemoveContainer" containerID="dd09cab093a36b9d52119f316977d3002230983aeff2f6db8b32ffe04edcedd4" | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: E0325 18:37:26.415252 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: E0325 18:37:26.416461 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: E0325 18:37:26.417263 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: E0325 18:37:26.418054 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: E0325 18:37:26.418877 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:26 k8s-master-1 kubelet[2894775]: E0325 18:37:26.418933 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:37:27 k8s-master-1 kubelet[2894775]: I0325 18:37:27.198816 2894775 scope.go:110] "RemoveContainer" containerID="dd09cab093a36b9d52119f316977d3002230983aeff2f6db8b32ffe04edcedd4" | |
Mar 25 18:37:27 k8s-master-1 kubelet[2894775]: E0325 18:37:27.331996 2894775 remote_runtime.go:505] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to set removing state for container \"dd09cab093a36b9d52119f316977d3002230983aeff2f6db8b32ffe04edcedd4\": container is already in removing state" containerID="dd09cab093a36b9d52119f316977d3002230983aeff2f6db8b32ffe04edcedd4" | |
Mar 25 18:37:27 k8s-master-1 kubelet[2894775]: E0325 18:37:27.332096 2894775 kuberuntime_gc.go:146] "Failed to remove container" err="rpc error: code = Unknown desc = failed to set removing state for container \"dd09cab093a36b9d52119f316977d3002230983aeff2f6db8b32ffe04edcedd4\": container is already in removing state" containerID="dd09cab093a36b9d52119f316977d3002230983aeff2f6db8b32ffe04edcedd4" | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: I0325 18:37:28.392638 2894775 pod_container_deletor.go:79] "Container not found in pod's containers" containerID="d479a7723b37197dba97dd4b4f39699e7b164882ecaf85286948955a387b48e9" | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: I0325 18:37:28.394704 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: E0325 18:37:28.665747 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-k8s-master-1_kube-system(737409118e728d54b637db0904da4725)\"" pod="kube-system/kube-controller-manager-k8s-master-1" podUID=737409118e728d54b637db0904da4725 | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: W0325 18:37:28.691028 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:28 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: I0325 18:37:29.244327 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: I0325 18:37:29.245501 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: I0325 18:37:29.246589 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: I0325 18:37:29.247770 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: I0325 18:37:29.248799 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: I0325 18:37:29.401222 2894775 scope.go:110] "RemoveContainer" containerID="c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26" | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: E0325 18:37:29.402747 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-k8s-master-1_kube-system(737409118e728d54b637db0904da4725)\"" pod="kube-system/kube-controller-manager-k8s-master-1" podUID=737409118e728d54b637db0904da4725 | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: I0325 18:37:29.402972 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: W0325 18:37:29.437950 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:29 k8s-master-1 kubelet[2894775]: E0325 18:37:29.983696 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:30 k8s-master-1 kubelet[2894775]: I0325 18:37:30.404333 2894775 scope.go:110] "RemoveContainer" containerID="c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26" | |
Mar 25 18:37:30 k8s-master-1 kubelet[2894775]: E0325 18:37:30.405574 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-k8s-master-1_kube-system(737409118e728d54b637db0904da4725)\"" pod="kube-system/kube-controller-manager-k8s-master-1" podUID=737409118e728d54b637db0904da4725 | |
Mar 25 18:37:31 k8s-master-1 kubelet[2894775]: E0325 18:37:31.216814 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:31 k8s-master-1 kubelet[2894775]: I0325 18:37:31.407229 2894775 scope.go:110] "RemoveContainer" containerID="c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26" | |
Mar 25 18:37:31 k8s-master-1 kubelet[2894775]: E0325 18:37:31.409125 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-k8s-master-1_kube-system(737409118e728d54b637db0904da4725)\"" pod="kube-system/kube-controller-manager-k8s-master-1" podUID=737409118e728d54b637db0904da4725 | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: I0325 18:37:33.244639 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: E0325 18:37:33.246071 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: W0325 18:37:33.434648 2894775 container.go:590] Failed to update stats for container "/system.slice/kubelet.service": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:33 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: W0325 18:37:34.928942 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:34 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: I0325 18:37:35.245794 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: E0325 18:37:35.248407 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: I0325 18:37:35.310200 2894775 scope.go:110] "RemoveContainer" containerID="629f0105cb2a3632178490656ec0f6b6b5b9c8db4dd35dbe66a5f6b6fb4d429b" | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: W0325 18:37:35.792764 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:35 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: W0325 18:37:36.186958 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.217986 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.226274 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.228057 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.229442 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.229551 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: I0325 18:37:36.229605 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: I0325 18:37:36.428909 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.578557 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.579363 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.580456 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.581248 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.581979 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.582032 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:37:36 k8s-master-1 kubelet[2894775]: E0325 18:37:36.984978 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:37 k8s-master-1 kubelet[2894775]: I0325 18:37:37.306586 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:37:37 k8s-master-1 kubelet[2894775]: E0325 18:37:37.308719 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: W0325 18:37:38.969944 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:38 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:39 k8s-master-1 kubelet[2894775]: I0325 18:37:39.244106 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:39 k8s-master-1 kubelet[2894775]: I0325 18:37:39.245233 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:39 k8s-master-1 kubelet[2894775]: I0325 18:37:39.246284 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:39 k8s-master-1 kubelet[2894775]: I0325 18:37:39.247305 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:39 k8s-master-1 kubelet[2894775]: I0325 18:37:39.248336 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: E0325 18:37:41.220755 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: W0325 18:37:41.984893 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:41 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:43 k8s-master-1 kubelet[2894775]: E0325 18:37:43.987226 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: I0325 18:37:45.245942 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: E0325 18:37:45.249829 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: I0325 18:37:45.249939 2894775 scope.go:110] "RemoveContainer" containerID="c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26" | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: W0325 18:37:45.861396 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:45 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: E0325 18:37:46.225661 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: E0325 18:37:46.252699 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: E0325 18:37:46.253838 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: E0325 18:37:46.254844 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: E0325 18:37:46.254939 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: I0325 18:37:46.254991 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: I0325 18:37:46.478158 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: E0325 18:37:46.622927 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: E0325 18:37:46.623983 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: E0325 18:37:46.625017 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: E0325 18:37:46.626061 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: E0325 18:37:46.627016 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:46 k8s-master-1 kubelet[2894775]: E0325 18:37:46.627077 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: W0325 18:37:47.770947 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:47 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: I0325 18:37:49.244566 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: I0325 18:37:49.245275 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: I0325 18:37:49.245900 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: I0325 18:37:49.246490 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: I0325 18:37:49.247077 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: W0325 18:37:49.649367 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:49 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:50 k8s-master-1 kubelet[2894775]: I0325 18:37:50.243904 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:37:50 k8s-master-1 kubelet[2894775]: E0325 18:37:50.245531 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:37:50 k8s-master-1 kubelet[2894775]: E0325 18:37:50.989750 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: E0325 18:37:51.228693 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: W0325 18:37:51.437762 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:51 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:52 k8s-master-1 kubelet[2894775]: W0325 18:37:52.943787 2894775 reflector.go:324] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:52 k8s-master-1 kubelet[2894775]: E0325 18:37:52.944153 2894775 reflector.go:138] object-"kube-system"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:53 k8s-master-1 kubelet[2894775]: I0325 18:37:53.245474 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:37:53 k8s-master-1 kubelet[2894775]: E0325 18:37:53.247243 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: E0325 18:37:56.231389 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: E0325 18:37:56.301718 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: E0325 18:37:56.304614 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: E0325 18:37:56.306411 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: E0325 18:37:56.306564 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: I0325 18:37:56.306664 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: E0325 18:37:56.797284 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: E0325 18:37:56.799113 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: E0325 18:37:56.801048 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: E0325 18:37:56.803056 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: E0325 18:37:56.804777 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:56 k8s-master-1 kubelet[2894775]: E0325 18:37:56.804903 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: W0325 18:37:57.895149 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:37:57 k8s-master-1 kubelet[2894775]: E0325 18:37:57.991389 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:37:58 k8s-master-1 kubelet[2894775]: I0325 18:37:58.245460 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:37:58 k8s-master-1 kubelet[2894775]: E0325 18:37:58.249241 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:37:58 k8s-master-1 kubelet[2894775]: I0325 18:37:58.445695 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:58 k8s-master-1 kubelet[2894775]: I0325 18:37:58.447467 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:59 k8s-master-1 kubelet[2894775]: I0325 18:37:59.245226 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:59 k8s-master-1 kubelet[2894775]: I0325 18:37:59.247039 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:59 k8s-master-1 kubelet[2894775]: I0325 18:37:59.249253 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:59 k8s-master-1 kubelet[2894775]: I0325 18:37:59.251230 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:37:59 k8s-master-1 kubelet[2894775]: I0325 18:37:59.253185 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:01 k8s-master-1 kubelet[2894775]: E0325 18:38:01.234068 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:02 k8s-master-1 kubelet[2894775]: I0325 18:38:02.245376 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:38:02 k8s-master-1 kubelet[2894775]: E0325 18:38:02.247659 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: I0325 18:38:04.245462 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: E0325 18:38:04.247174 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: W0325 18:38:04.441443 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:04 k8s-master-1 kubelet[2894775]: E0325 18:38:04.993617 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:05 k8s-master-1 kubelet[2894775]: W0325 18:38:05.495773 2894775 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:05 k8s-master-1 kubelet[2894775]: E0325 18:38:05.495961 2894775 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:06 k8s-master-1 kubelet[2894775]: E0325 18:38:06.236195 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:06 k8s-master-1 kubelet[2894775]: E0325 18:38:06.329557 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:38:06 k8s-master-1 kubelet[2894775]: E0325 18:38:06.330730 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:38:06 k8s-master-1 kubelet[2894775]: E0325 18:38:06.331683 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:38:06 k8s-master-1 kubelet[2894775]: E0325 18:38:06.331765 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:38:06 k8s-master-1 kubelet[2894775]: I0325 18:38:06.331845 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:38:07 k8s-master-1 kubelet[2894775]: E0325 18:38:07.144073 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:07 k8s-master-1 kubelet[2894775]: E0325 18:38:07.144842 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:07 k8s-master-1 kubelet[2894775]: E0325 18:38:07.145657 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:07 k8s-master-1 kubelet[2894775]: E0325 18:38:07.146469 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:07 k8s-master-1 kubelet[2894775]: E0325 18:38:07.147235 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:07 k8s-master-1 kubelet[2894775]: E0325 18:38:07.147291 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:38:09 k8s-master-1 kubelet[2894775]: I0325 18:38:09.244357 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:09 k8s-master-1 kubelet[2894775]: I0325 18:38:09.245417 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:09 k8s-master-1 kubelet[2894775]: I0325 18:38:09.246588 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:09 k8s-master-1 kubelet[2894775]: I0325 18:38:09.247708 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:09 k8s-master-1 kubelet[2894775]: I0325 18:38:09.248961 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:10 k8s-master-1 kubelet[2894775]: I0325 18:38:10.266461 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:38:10 k8s-master-1 kubelet[2894775]: E0325 18:38:10.268044 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:38:11 k8s-master-1 kubelet[2894775]: E0325 18:38:11.238901 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:11 k8s-master-1 kubelet[2894775]: E0325 18:38:11.996470 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:13 k8s-master-1 kubelet[2894775]: I0325 18:38:13.244843 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:38:13 k8s-master-1 kubelet[2894775]: E0325 18:38:13.247126 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:38:16 k8s-master-1 kubelet[2894775]: E0325 18:38:16.241341 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:16 k8s-master-1 kubelet[2894775]: I0325 18:38:16.245575 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:38:16 k8s-master-1 kubelet[2894775]: E0325 18:38:16.247526 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:38:16 k8s-master-1 kubelet[2894775]: E0325 18:38:16.375121 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:38:16 k8s-master-1 kubelet[2894775]: E0325 18:38:16.379026 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:38:16 k8s-master-1 kubelet[2894775]: E0325 18:38:16.379231 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:38:16 k8s-master-1 kubelet[2894775]: E0325 18:38:16.379350 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:38:16 k8s-master-1 kubelet[2894775]: I0325 18:38:16.379451 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:38:17 k8s-master-1 kubelet[2894775]: E0325 18:38:17.430873 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:17 k8s-master-1 kubelet[2894775]: E0325 18:38:17.432433 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:17 k8s-master-1 kubelet[2894775]: E0325 18:38:17.434113 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:17 k8s-master-1 kubelet[2894775]: E0325 18:38:17.435764 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:17 k8s-master-1 kubelet[2894775]: E0325 18:38:17.437478 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:17 k8s-master-1 kubelet[2894775]: E0325 18:38:17.437615 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:38:18 k8s-master-1 kubelet[2894775]: E0325 18:38:18.999128 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:19 k8s-master-1 kubelet[2894775]: I0325 18:38:19.245563 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:19 k8s-master-1 kubelet[2894775]: I0325 18:38:19.247555 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:19 k8s-master-1 kubelet[2894775]: I0325 18:38:19.249486 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:19 k8s-master-1 kubelet[2894775]: I0325 18:38:19.251180 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:19 k8s-master-1 kubelet[2894775]: I0325 18:38:19.253007 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: W0325 18:38:20.154428 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:20 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:21 k8s-master-1 kubelet[2894775]: I0325 18:38:21.245074 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:38:21 k8s-master-1 kubelet[2894775]: E0325 18:38:21.245068 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:21 k8s-master-1 kubelet[2894775]: E0325 18:38:21.250209 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:38:25 k8s-master-1 kubelet[2894775]: E0325 18:38:25.590513 2894775 projected.go:199] Error preparing data for projected volume kube-api-access-twh7b for pod kube-system/kube-proxy-fgm87: failed to fetch token: Post "https://192.168.1.194:6443/api/v1/namespaces/kube-system/serviceaccounts/kube-proxy/token": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:25 k8s-master-1 kubelet[2894775]: E0325 18:38:25.590869 2894775 nestedpendingoperations.go:335] Operation for "{volumeName:kubernetes.io/projected/95ab2fd8-d229-475f-b404-59e5ad925195-kube-api-access-twh7b podName:95ab2fd8-d229-475f-b404-59e5ad925195 nodeName:}" failed. No retries permitted until 2022-03-25 18:40:27.590759661 -0500 CDT m=+164793.980448734 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-twh7b" (UniqueName: "kubernetes.io/projected/95ab2fd8-d229-475f-b404-59e5ad925195-kube-api-access-twh7b") pod "kube-proxy-fgm87" (UID: "95ab2fd8-d229-475f-b404-59e5ad925195") : failed to fetch token: Post "https://192.168.1.194:6443/api/v1/namespaces/kube-system/serviceaccounts/kube-proxy/token": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: E0325 18:38:26.001919 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: E0325 18:38:26.247414 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: E0325 18:38:26.426535 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: E0325 18:38:26.429036 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: W0325 18:38:26.430798 2894775 container.go:590] Failed to update stats for container "/kubepods.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: E0325 18:38:26.431049 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: E0325 18:38:26.431188 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:38:26 k8s-master-1 kubelet[2894775]: I0325 18:38:26.431284 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:38:27 k8s-master-1 kubelet[2894775]: I0325 18:38:27.244732 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:38:27 k8s-master-1 kubelet[2894775]: E0325 18:38:27.246947 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:38:27 k8s-master-1 kubelet[2894775]: E0325 18:38:27.500314 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:27 k8s-master-1 kubelet[2894775]: E0325 18:38:27.501988 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:27 k8s-master-1 kubelet[2894775]: E0325 18:38:27.503629 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:27 k8s-master-1 kubelet[2894775]: E0325 18:38:27.505249 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:27 k8s-master-1 kubelet[2894775]: E0325 18:38:27.506712 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:27 k8s-master-1 kubelet[2894775]: E0325 18:38:27.506826 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:38:29 k8s-master-1 kubelet[2894775]: I0325 18:38:29.245677 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:38:29 k8s-master-1 kubelet[2894775]: I0325 18:38:29.246195 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:29 k8s-master-1 kubelet[2894775]: E0325 18:38:29.247394 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:38:29 k8s-master-1 kubelet[2894775]: I0325 18:38:29.247968 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:29 k8s-master-1 kubelet[2894775]: I0325 18:38:29.249798 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:29 k8s-master-1 kubelet[2894775]: I0325 18:38:29.251323 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:29 k8s-master-1 kubelet[2894775]: I0325 18:38:29.253265 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:31 k8s-master-1 kubelet[2894775]: E0325 18:38:31.249502 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: W0325 18:38:32.049466 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: I0325 18:38:32.245105 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: E0325 18:38:32.248731 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: W0325 18:38:32.776232 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:32 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:33 k8s-master-1 kubelet[2894775]: E0325 18:38:33.004989 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: W0325 18:38:36.076327 2894775 container.go:590] Failed to update stats for container "/system.slice/kubelet.service": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: E0325 18:38:36.251999 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: E0325 18:38:36.475452 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: E0325 18:38:36.477795 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: E0325 18:38:36.479394 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: E0325 18:38:36.479532 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:38:36 k8s-master-1 kubelet[2894775]: I0325 18:38:36.479613 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:38:37 k8s-master-1 kubelet[2894775]: E0325 18:38:37.537551 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:37 k8s-master-1 kubelet[2894775]: E0325 18:38:37.538457 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:37 k8s-master-1 kubelet[2894775]: E0325 18:38:37.539275 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:37 k8s-master-1 kubelet[2894775]: E0325 18:38:37.540222 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:37 k8s-master-1 kubelet[2894775]: E0325 18:38:37.541069 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:37 k8s-master-1 kubelet[2894775]: E0325 18:38:37.541124 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:38:37 k8s-master-1 kubelet[2894775]: I0325 18:38:37.729100 2894775 scope.go:110] "RemoveContainer" containerID="629f0105cb2a3632178490656ec0f6b6b5b9c8db4dd35dbe66a5f6b6fb4d429b" | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: I0325 18:38:38.735695 2894775 pod_container_deletor.go:79] "Container not found in pod's containers" containerID="c47f3c2c83f2c3bd40ed69ee4f22b6f89b715be3c91d6ddd120819eb73ac87bc" | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: I0325 18:38:38.739782 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: E0325 18:38:38.925488 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: W0325 18:38:38.978068 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:38 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:39 k8s-master-1 kubelet[2894775]: I0325 18:38:39.244574 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:39 k8s-master-1 kubelet[2894775]: I0325 18:38:39.245526 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:39 k8s-master-1 kubelet[2894775]: I0325 18:38:39.246609 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:39 k8s-master-1 kubelet[2894775]: I0325 18:38:39.247654 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:39 k8s-master-1 kubelet[2894775]: I0325 18:38:39.248581 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:39 k8s-master-1 kubelet[2894775]: I0325 18:38:39.743128 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:38:39 k8s-master-1 kubelet[2894775]: E0325 18:38:39.743917 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:38:39 k8s-master-1 kubelet[2894775]: I0325 18:38:39.743943 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:40 k8s-master-1 kubelet[2894775]: E0325 18:38:40.006174 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:40 k8s-master-1 kubelet[2894775]: I0325 18:38:40.745875 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:38:40 k8s-master-1 kubelet[2894775]: E0325 18:38:40.746503 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:38:41 k8s-master-1 kubelet[2894775]: E0325 18:38:41.254485 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:41 k8s-master-1 kubelet[2894775]: I0325 18:38:41.289844 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:38:41 k8s-master-1 kubelet[2894775]: E0325 18:38:41.290888 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:38:42 k8s-master-1 kubelet[2894775]: I0325 18:38:42.244196 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:38:42 k8s-master-1 kubelet[2894775]: E0325 18:38:42.244987 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:38:42 k8s-master-1 kubelet[2894775]: W0325 18:38:42.276328 2894775 reflector.go:324] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:42 k8s-master-1 kubelet[2894775]: E0325 18:38:42.276466 2894775 reflector.go:138] object-"kube-system"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: W0325 18:38:43.898443 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:43 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:44 k8s-master-1 kubelet[2894775]: I0325 18:38:44.245251 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:38:44 k8s-master-1 kubelet[2894775]: E0325 18:38:44.248882 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: W0325 18:38:46.028880 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: E0325 18:38:46.256738 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: E0325 18:38:46.521990 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: E0325 18:38:46.525072 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: E0325 18:38:46.527369 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: E0325 18:38:46.528124 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:38:46 k8s-master-1 kubelet[2894775]: I0325 18:38:46.528226 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: E0325 18:38:47.008662 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: W0325 18:38:47.136306 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: E0325 18:38:47.729472 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: E0325 18:38:47.731208 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: E0325 18:38:47.732915 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: E0325 18:38:47.734501 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: E0325 18:38:47.736341 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:47 k8s-master-1 kubelet[2894775]: E0325 18:38:47.736458 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: W0325 18:38:49.133680 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: I0325 18:38:49.244624 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: I0325 18:38:49.246480 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: I0325 18:38:49.248161 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: I0325 18:38:49.249689 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: I0325 18:38:49.251355 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: W0325 18:38:49.442935 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:49 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:51 k8s-master-1 kubelet[2894775]: E0325 18:38:51.259489 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:51 k8s-master-1 kubelet[2894775]: W0325 18:38:51.737826 2894775 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:51 k8s-master-1 kubelet[2894775]: E0325 18:38:51.738103 2894775 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: E0325 18:38:54.010838 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: I0325 18:38:54.245615 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: E0325 18:38:54.248829 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: I0325 18:38:54.302057 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: E0325 18:38:54.304161 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: W0325 18:38:54.902138 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:54 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:55 k8s-master-1 kubelet[2894775]: I0325 18:38:55.304880 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:38:55 k8s-master-1 kubelet[2894775]: E0325 18:38:55.306116 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:38:56 k8s-master-1 kubelet[2894775]: E0325 18:38:56.262310 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:38:56 k8s-master-1 kubelet[2894775]: E0325 18:38:56.572124 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:38:56 k8s-master-1 kubelet[2894775]: E0325 18:38:56.574071 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:38:56 k8s-master-1 kubelet[2894775]: E0325 18:38:56.576541 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:38:56 k8s-master-1 kubelet[2894775]: E0325 18:38:56.576728 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:38:56 k8s-master-1 kubelet[2894775]: I0325 18:38:56.576835 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: W0325 18:38:57.958955 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:57 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: E0325 18:38:58.135279 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: E0325 18:38:58.137049 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: E0325 18:38:58.138561 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: E0325 18:38:58.140078 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: E0325 18:38:58.141569 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: E0325 18:38:58.141686 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: W0325 18:38:58.394592 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:38:58 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:38:59 k8s-master-1 kubelet[2894775]: W0325 18:38:59.217321 2894775 sysinfo.go:203] Nodes topology is not available, providing CPU topology | |
Mar 25 18:38:59 k8s-master-1 kubelet[2894775]: W0325 18:38:59.226006 2894775 machine.go:65] Cannot read vendor id correctly, set empty. | |
Mar 25 18:38:59 k8s-master-1 kubelet[2894775]: I0325 18:38:59.244989 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:38:59 k8s-master-1 kubelet[2894775]: I0325 18:38:59.246080 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:59 k8s-master-1 kubelet[2894775]: E0325 18:38:59.248005 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:38:59 k8s-master-1 kubelet[2894775]: I0325 18:38:59.249181 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:59 k8s-master-1 kubelet[2894775]: I0325 18:38:59.250826 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:59 k8s-master-1 kubelet[2894775]: I0325 18:38:59.252426 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:38:59 k8s-master-1 kubelet[2894775]: I0325 18:38:59.254206 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:01 k8s-master-1 kubelet[2894775]: E0325 18:39:01.013026 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:39:01 k8s-master-1 kubelet[2894775]: E0325 18:39:01.265332 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:06 k8s-master-1 kubelet[2894775]: E0325 18:39:06.267524 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:06 k8s-master-1 kubelet[2894775]: E0325 18:39:06.628555 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:39:06 k8s-master-1 kubelet[2894775]: E0325 18:39:06.636171 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:39:06 k8s-master-1 kubelet[2894775]: E0325 18:39:06.636412 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:39:06 k8s-master-1 kubelet[2894775]: E0325 18:39:06.636557 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:39:06 k8s-master-1 kubelet[2894775]: I0325 18:39:06.640529 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:39:07 k8s-master-1 kubelet[2894775]: I0325 18:39:07.245531 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:39:07 k8s-master-1 kubelet[2894775]: E0325 18:39:07.247394 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:39:07 k8s-master-1 kubelet[2894775]: E0325 18:39:07.249972 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:39:08 k8s-master-1 kubelet[2894775]: E0325 18:39:08.015978 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:39:08 k8s-master-1 kubelet[2894775]: E0325 18:39:08.339237 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:08 k8s-master-1 kubelet[2894775]: E0325 18:39:08.341032 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:08 k8s-master-1 kubelet[2894775]: E0325 18:39:08.342642 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:08 k8s-master-1 kubelet[2894775]: E0325 18:39:08.344722 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:08 k8s-master-1 kubelet[2894775]: E0325 18:39:08.346370 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:08 k8s-master-1 kubelet[2894775]: E0325 18:39:08.346493 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:39:08 k8s-master-1 kubelet[2894775]: E0325 18:39:08.936920 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: I0325 18:39:09.245580 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: I0325 18:39:09.246711 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: E0325 18:39:09.248161 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: I0325 18:39:09.249226 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: I0325 18:39:09.252650 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: I0325 18:39:09.254697 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: I0325 18:39:09.257618 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: I0325 18:39:09.309340 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: E0325 18:39:09.310394 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: W0325 18:39:09.514390 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:09 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:11 k8s-master-1 kubelet[2894775]: E0325 18:39:11.269801 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:12 k8s-master-1 kubelet[2894775]: I0325 18:39:12.285961 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:39:12 k8s-master-1 kubelet[2894775]: E0325 18:39:12.289396 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:39:15 k8s-master-1 kubelet[2894775]: E0325 18:39:15.017857 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:39:16 k8s-master-1 kubelet[2894775]: E0325 18:39:16.272331 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:16 k8s-master-1 kubelet[2894775]: E0325 18:39:16.687332 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:39:16 k8s-master-1 kubelet[2894775]: E0325 18:39:16.691109 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:39:16 k8s-master-1 kubelet[2894775]: E0325 18:39:16.691312 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:39:16 k8s-master-1 kubelet[2894775]: E0325 18:39:16.691444 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:39:16 k8s-master-1 kubelet[2894775]: I0325 18:39:16.691548 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:39:18 k8s-master-1 kubelet[2894775]: I0325 18:39:18.245322 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:39:18 k8s-master-1 kubelet[2894775]: E0325 18:39:18.247084 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:39:18 k8s-master-1 kubelet[2894775]: E0325 18:39:18.720936 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:18 k8s-master-1 kubelet[2894775]: E0325 18:39:18.722822 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:18 k8s-master-1 kubelet[2894775]: E0325 18:39:18.724526 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:18 k8s-master-1 kubelet[2894775]: E0325 18:39:18.726434 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:18 k8s-master-1 kubelet[2894775]: E0325 18:39:18.728213 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:18 k8s-master-1 kubelet[2894775]: E0325 18:39:18.728349 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:39:18 k8s-master-1 kubelet[2894775]: E0325 18:39:18.940247 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:39:19 k8s-master-1 kubelet[2894775]: I0325 18:39:19.245691 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:19 k8s-master-1 kubelet[2894775]: I0325 18:39:19.247349 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:19 k8s-master-1 kubelet[2894775]: I0325 18:39:19.249139 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:19 k8s-master-1 kubelet[2894775]: I0325 18:39:19.250932 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:19 k8s-master-1 kubelet[2894775]: I0325 18:39:19.252728 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:20 k8s-master-1 kubelet[2894775]: I0325 18:39:20.245185 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:39:20 k8s-master-1 kubelet[2894775]: E0325 18:39:20.247493 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:39:21 k8s-master-1 kubelet[2894775]: E0325 18:39:21.275332 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:22 k8s-master-1 kubelet[2894775]: E0325 18:39:22.021536 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:39:24 k8s-master-1 kubelet[2894775]: I0325 18:39:24.307741 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:39:24 k8s-master-1 kubelet[2894775]: E0325 18:39:24.309123 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: E0325 18:39:26.277410 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: I0325 18:39:26.304727 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: E0325 18:39:26.309751 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: E0325 18:39:26.731800 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: E0325 18:39:26.733507 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: W0325 18:39:26.735189 2894775 container.go:590] Failed to update stats for container "/kubepods.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: E0325 18:39:26.735561 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: E0325 18:39:26.735687 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:39:26 k8s-master-1 kubelet[2894775]: I0325 18:39:26.735785 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:39:27 k8s-master-1 kubelet[2894775]: W0325 18:39:27.567533 2894775 reflector.go:324] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:39:27 k8s-master-1 kubelet[2894775]: E0325 18:39:27.567897 2894775 reflector.go:138] object-"kube-system"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:39:28 k8s-master-1 kubelet[2894775]: E0325 18:39:28.943080 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:39:28 k8s-master-1 kubelet[2894775]: E0325 18:39:28.978397 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:28 k8s-master-1 kubelet[2894775]: E0325 18:39:28.980206 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:28 k8s-master-1 kubelet[2894775]: E0325 18:39:28.981838 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:28 k8s-master-1 kubelet[2894775]: E0325 18:39:28.983707 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:28 k8s-master-1 kubelet[2894775]: E0325 18:39:28.985308 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:28 k8s-master-1 kubelet[2894775]: E0325 18:39:28.985430 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:39:29 k8s-master-1 kubelet[2894775]: E0325 18:39:29.023714 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:39:29 k8s-master-1 kubelet[2894775]: I0325 18:39:29.245381 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:29 k8s-master-1 kubelet[2894775]: I0325 18:39:29.247473 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:29 k8s-master-1 kubelet[2894775]: I0325 18:39:29.249678 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:29 k8s-master-1 kubelet[2894775]: I0325 18:39:29.251582 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:29 k8s-master-1 kubelet[2894775]: I0325 18:39:29.253576 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:30 k8s-master-1 kubelet[2894775]: I0325 18:39:30.244879 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:39:30 k8s-master-1 kubelet[2894775]: E0325 18:39:30.246635 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:39:31 k8s-master-1 kubelet[2894775]: E0325 18:39:31.279712 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:31 k8s-master-1 kubelet[2894775]: W0325 18:39:31.594466 2894775 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:39:31 k8s-master-1 kubelet[2894775]: E0325 18:39:31.594739 2894775 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: I0325 18:39:33.245245 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: W0325 18:39:33.898879 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:33 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:34 k8s-master-1 kubelet[2894775]: I0325 18:39:34.004170 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: I0325 18:39:35.288084 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: E0325 18:39:35.288784 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: W0325 18:39:35.385995 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: W0325 18:39:35.970300 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:35 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:36 k8s-master-1 kubelet[2894775]: E0325 18:39:36.025594 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:39:36 k8s-master-1 kubelet[2894775]: E0325 18:39:36.281688 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:36 k8s-master-1 kubelet[2894775]: E0325 18:39:36.759475 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:39:36 k8s-master-1 kubelet[2894775]: E0325 18:39:36.762222 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:39:36 k8s-master-1 kubelet[2894775]: E0325 18:39:36.762328 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:39:36 k8s-master-1 kubelet[2894775]: E0325 18:39:36.762390 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:39:36 k8s-master-1 kubelet[2894775]: I0325 18:39:36.762440 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:39:38 k8s-master-1 kubelet[2894775]: I0325 18:39:38.028421 2894775 scope.go:110] "RemoveContainer" containerID="aff8aff9c3dbba9ee5d22637fd27f40e384a176ed64395343ed634b848513288" | |
Mar 25 18:39:38 k8s-master-1 kubelet[2894775]: E0325 18:39:38.946388 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: I0325 18:39:39.038875 2894775 pod_container_deletor.go:79] "Container not found in pod's containers" containerID="612cbadd81003c9874b61d4a3e5fe5726d0c4ed5dc9280c213bdec0e4bdbda18" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: I0325 18:39:39.041972 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: E0325 18:39:39.221466 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: E0325 18:39:39.222593 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: E0325 18:39:39.223771 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: E0325 18:39:39.224854 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: E0325 18:39:39.227877 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: E0325 18:39:39.227965 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: I0325 18:39:39.245047 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: I0325 18:39:39.246321 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: I0325 18:39:39.247555 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: I0325 18:39:39.248923 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: I0325 18:39:39.250243 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: W0325 18:39:39.343964 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:39 k8s-master-1 kubelet[2894775]: E0325 18:39:39.345788 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:39:40 k8s-master-1 kubelet[2894775]: I0325 18:39:40.048177 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:39:40 k8s-master-1 kubelet[2894775]: E0325 18:39:40.049210 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:39:40 k8s-master-1 kubelet[2894775]: I0325 18:39:40.050458 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:39:41 k8s-master-1 kubelet[2894775]: I0325 18:39:41.050539 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:39:41 k8s-master-1 kubelet[2894775]: E0325 18:39:41.051573 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:39:41 k8s-master-1 kubelet[2894775]: I0325 18:39:41.244687 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:39:41 k8s-master-1 kubelet[2894775]: I0325 18:39:41.245345 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:39:41 k8s-master-1 kubelet[2894775]: E0325 18:39:41.246220 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:39:41 k8s-master-1 kubelet[2894775]: E0325 18:39:41.283651 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: W0325 18:39:42.771599 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:42 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:43 k8s-master-1 kubelet[2894775]: E0325 18:39:43.026798 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: W0325 18:39:44.500385 2894775 container.go:590] Failed to update stats for container "/system.slice/kubelet.service": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: I0325 18:39:44.815565 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:39:44 k8s-master-1 kubelet[2894775]: E0325 18:39:44.817898 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:39:46 k8s-master-1 kubelet[2894775]: E0325 18:39:46.286592 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:46 k8s-master-1 kubelet[2894775]: E0325 18:39:46.804568 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:39:46 k8s-master-1 kubelet[2894775]: E0325 18:39:46.810000 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:39:46 k8s-master-1 kubelet[2894775]: E0325 18:39:46.811064 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:39:46 k8s-master-1 kubelet[2894775]: E0325 18:39:46.811759 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:39:46 k8s-master-1 kubelet[2894775]: I0325 18:39:46.812453 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: W0325 18:39:47.309599 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: W0325 18:39:47.541838 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: W0325 18:39:47.607209 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: I0325 18:39:47.687588 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:39:47 k8s-master-1 kubelet[2894775]: E0325 18:39:47.688825 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: I0325 18:39:50.288405 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: E0325 18:39:50.289140 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: W0325 18:39:50.861120 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:50 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:51 k8s-master-1 kubelet[2894775]: E0325 18:39:51.288328 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: W0325 18:39:52.674618 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:52 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: E0325 18:39:56.294078 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: I0325 18:39:56.297523 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: W0325 18:39:56.437501 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: W0325 18:39:56.823793 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: E0325 18:39:56.850203 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: E0325 18:39:56.852169 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: E0325 18:39:56.853264 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: E0325 18:39:56.853384 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:39:56 k8s-master-1 kubelet[2894775]: I0325 18:39:56.853436 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: I0325 18:39:58.082308 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": net/http: TLS handshake timeout" | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: E0325 18:39:58.949727 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": net/http: TLS handshake timeout'(may retry after sleeping) | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: W0325 18:39:58.989423 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:39:58 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:39:59 k8s-master-1 kubelet[2894775]: E0325 18:39:59.453707 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" | |
Mar 25 18:40:00 k8s-master-1 kubelet[2894775]: E0325 18:40:00.028800 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": context deadline exceeded | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: I0325 18:40:01.244995 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: E0325 18:40:01.246351 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: I0325 18:40:01.291878 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: E0325 18:40:01.292594 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: E0325 18:40:01.295592 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: W0325 18:40:01.676033 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:01 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:06 k8s-master-1 kubelet[2894775]: E0325 18:40:06.297699 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:06 k8s-master-1 kubelet[2894775]: E0325 18:40:06.910119 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:40:06 k8s-master-1 kubelet[2894775]: E0325 18:40:06.915067 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:40:06 k8s-master-1 kubelet[2894775]: E0325 18:40:06.917235 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:40:06 k8s-master-1 kubelet[2894775]: E0325 18:40:06.917409 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:40:06 k8s-master-1 kubelet[2894775]: I0325 18:40:06.917527 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:40:08 k8s-master-1 kubelet[2894775]: I0325 18:40:08.084093 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": net/http: TLS handshake timeout" | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: E0325 18:40:09.456823 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: E0325 18:40:09.459927 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": read tcp 192.168.1.194:58926->192.168.1.194:6443: use of closed network connection'(may retry after sleeping) | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: W0325 18:40:09.496212 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: W0325 18:40:09.739262 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:09 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: E0325 18:40:10.456655 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: I0325 18:40:10.457156 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: I0325 18:40:10.459455 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: I0325 18:40:10.460447 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: I0325 18:40:10.461366 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: I0325 18:40:10.462324 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: I0325 18:40:10.463246 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: I0325 18:40:10.657608 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: I0325 18:40:10.857202 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: E0325 18:40:10.986208 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: E0325 18:40:10.987435 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: E0325 18:40:10.988544 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:10 k8s-master-1 kubelet[2894775]: E0325 18:40:10.988611 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: I0325 18:40:11.057628 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: I0325 18:40:11.183621 2894775 pod_container_deletor.go:79] "Container not found in pod's containers" containerID="7a1d50eae6bb8863f2be7716036072b8bac2a39e8cb26bfcc798be83aecdf420" | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: I0325 18:40:11.183741 2894775 scope.go:110] "RemoveContainer" containerID="0d508260ec0c26385a58811ae83cb93d32a28fa943d9c4fb1e1e4423f458d4e9" | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: I0325 18:40:11.257788 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: E0325 18:40:11.299771 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: E0325 18:40:11.522141 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: W0325 18:40:11.534101 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: W0325 18:40:11.566129 2894775 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:11 k8s-master-1 kubelet[2894775]: E0325 18:40:11.566271 2894775 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:12 k8s-master-1 kubelet[2894775]: I0325 18:40:12.191636 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:40:12 k8s-master-1 kubelet[2894775]: I0325 18:40:12.193020 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:12 k8s-master-1 kubelet[2894775]: E0325 18:40:12.193203 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:40:12 k8s-master-1 kubelet[2894775]: I0325 18:40:12.198184 2894775 pod_container_deletor.go:79] "Container not found in pod's containers" containerID="35ffc0e4f6d6cf56c2a31b28ff0ff0ce39bc5e94ca910734533519b56a7dbef4" | |
Mar 25 18:40:12 k8s-master-1 kubelet[2894775]: I0325 18:40:12.198259 2894775 scope.go:110] "RemoveContainer" containerID="d544a266b6aa43d9e09a3fc9c0282d59300fd915d2f388f0977f6e6133567da2" | |
Mar 25 18:40:12 k8s-master-1 kubelet[2894775]: I0325 18:40:12.200255 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: I0325 18:40:13.208652 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: E0325 18:40:13.210195 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: E0325 18:40:13.373116 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: W0325 18:40:13.398047 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:13 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:14 k8s-master-1 kubelet[2894775]: I0325 18:40:14.215281 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:40:14 k8s-master-1 kubelet[2894775]: I0325 18:40:14.215743 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:14 k8s-master-1 kubelet[2894775]: E0325 18:40:14.216750 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:40:14 k8s-master-1 kubelet[2894775]: I0325 18:40:14.292309 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:40:14 k8s-master-1 kubelet[2894775]: E0325 18:40:14.293015 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:40:15 k8s-master-1 kubelet[2894775]: I0325 18:40:15.034158 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:40:15 k8s-master-1 kubelet[2894775]: E0325 18:40:15.036130 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:40:15 k8s-master-1 kubelet[2894775]: I0325 18:40:15.220702 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:40:15 k8s-master-1 kubelet[2894775]: E0325 18:40:15.222485 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:40:16 k8s-master-1 kubelet[2894775]: I0325 18:40:16.226005 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:40:16 k8s-master-1 kubelet[2894775]: E0325 18:40:16.227705 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:40:16 k8s-master-1 kubelet[2894775]: I0325 18:40:16.245537 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:40:16 k8s-master-1 kubelet[2894775]: E0325 18:40:16.247718 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:40:16 k8s-master-1 kubelet[2894775]: E0325 18:40:16.302308 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:16 k8s-master-1 kubelet[2894775]: E0325 18:40:16.963583 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:40:16 k8s-master-1 kubelet[2894775]: E0325 18:40:16.966678 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:40:16 k8s-master-1 kubelet[2894775]: E0325 18:40:16.968809 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:40:16 k8s-master-1 kubelet[2894775]: E0325 18:40:16.968981 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:40:16 k8s-master-1 kubelet[2894775]: I0325 18:40:16.969096 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:40:17 k8s-master-1 kubelet[2894775]: E0325 18:40:17.459785 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:19 k8s-master-1 kubelet[2894775]: I0325 18:40:19.244924 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:19 k8s-master-1 kubelet[2894775]: I0325 18:40:19.247454 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:19 k8s-master-1 kubelet[2894775]: I0325 18:40:19.250070 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:19 k8s-master-1 kubelet[2894775]: I0325 18:40:19.251997 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:19 k8s-master-1 kubelet[2894775]: I0325 18:40:19.254017 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:19 k8s-master-1 kubelet[2894775]: E0325 18:40:19.462433 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:40:20 k8s-master-1 kubelet[2894775]: I0325 18:40:20.141022 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:40:20 k8s-master-1 kubelet[2894775]: W0325 18:40:20.141185 2894775 reflector.go:324] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:20 k8s-master-1 kubelet[2894775]: E0325 18:40:20.141463 2894775 reflector.go:138] object-"kube-system"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:20 k8s-master-1 kubelet[2894775]: E0325 18:40:20.143063 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:40:21 k8s-master-1 kubelet[2894775]: E0325 18:40:21.062244 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:21 k8s-master-1 kubelet[2894775]: E0325 18:40:21.064185 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:21 k8s-master-1 kubelet[2894775]: E0325 18:40:21.065952 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:21 k8s-master-1 kubelet[2894775]: E0325 18:40:21.067692 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:21 k8s-master-1 kubelet[2894775]: E0325 18:40:21.069323 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:21 k8s-master-1 kubelet[2894775]: E0325 18:40:21.069445 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:40:21 k8s-master-1 kubelet[2894775]: E0325 18:40:21.304394 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:24 k8s-master-1 kubelet[2894775]: E0325 18:40:24.462776 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:26 k8s-master-1 kubelet[2894775]: I0325 18:40:26.299388 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:40:26 k8s-master-1 kubelet[2894775]: E0325 18:40:26.303423 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:40:26 k8s-master-1 kubelet[2894775]: E0325 18:40:26.306914 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: E0325 18:40:27.006684 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: W0325 18:40:27.009941 2894775 container.go:590] Failed to update stats for container "/kubepods.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: E0325 18:40:27.010223 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: E0325 18:40:27.010392 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: E0325 18:40:27.010513 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: I0325 18:40:27.010601 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: E0325 18:40:27.669353 2894775 projected.go:199] Error preparing data for projected volume kube-api-access-twh7b for pod kube-system/kube-proxy-fgm87: failed to fetch token: Post "https://192.168.1.194:6443/api/v1/namespaces/kube-system/serviceaccounts/kube-proxy/token": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:27 k8s-master-1 kubelet[2894775]: E0325 18:40:27.669697 2894775 nestedpendingoperations.go:335] Operation for "{volumeName:kubernetes.io/projected/95ab2fd8-d229-475f-b404-59e5ad925195-kube-api-access-twh7b podName:95ab2fd8-d229-475f-b404-59e5ad925195 nodeName:}" failed. No retries permitted until 2022-03-25 18:42:29.669590706 -0500 CDT m=+164916.059279835 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-twh7b" (UniqueName: "kubernetes.io/projected/95ab2fd8-d229-475f-b404-59e5ad925195-kube-api-access-twh7b") pod "kube-proxy-fgm87" (UID: "95ab2fd8-d229-475f-b404-59e5ad925195") : failed to fetch token: Post "https://192.168.1.194:6443/api/v1/namespaces/kube-system/serviceaccounts/kube-proxy/token": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:28 k8s-master-1 kubelet[2894775]: I0325 18:40:28.305833 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:40:28 k8s-master-1 kubelet[2894775]: E0325 18:40:28.309109 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:40:29 k8s-master-1 kubelet[2894775]: I0325 18:40:29.245405 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:29 k8s-master-1 kubelet[2894775]: I0325 18:40:29.247182 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:29 k8s-master-1 kubelet[2894775]: I0325 18:40:29.248932 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:29 k8s-master-1 kubelet[2894775]: I0325 18:40:29.250614 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:29 k8s-master-1 kubelet[2894775]: I0325 18:40:29.252404 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:29 k8s-master-1 kubelet[2894775]: E0325 18:40:29.464523 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:40:31 k8s-master-1 kubelet[2894775]: E0325 18:40:31.127358 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:31 k8s-master-1 kubelet[2894775]: E0325 18:40:31.129063 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:31 k8s-master-1 kubelet[2894775]: E0325 18:40:31.130691 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:31 k8s-master-1 kubelet[2894775]: E0325 18:40:31.132803 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:31 k8s-master-1 kubelet[2894775]: E0325 18:40:31.134677 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:31 k8s-master-1 kubelet[2894775]: E0325 18:40:31.134789 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:40:31 k8s-master-1 kubelet[2894775]: I0325 18:40:31.246438 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:40:31 k8s-master-1 kubelet[2894775]: E0325 18:40:31.248936 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:40:31 k8s-master-1 kubelet[2894775]: E0325 18:40:31.309028 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:31 k8s-master-1 kubelet[2894775]: E0325 18:40:31.465627 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:32 k8s-master-1 kubelet[2894775]: I0325 18:40:32.244583 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:40:32 k8s-master-1 kubelet[2894775]: E0325 18:40:32.246287 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:40:36 k8s-master-1 kubelet[2894775]: E0325 18:40:36.311962 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:37 k8s-master-1 kubelet[2894775]: E0325 18:40:37.050892 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:40:37 k8s-master-1 kubelet[2894775]: E0325 18:40:37.054266 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:40:37 k8s-master-1 kubelet[2894775]: E0325 18:40:37.054456 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:40:37 k8s-master-1 kubelet[2894775]: E0325 18:40:37.054566 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:40:37 k8s-master-1 kubelet[2894775]: I0325 18:40:37.054661 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:40:38 k8s-master-1 kubelet[2894775]: E0325 18:40:38.468587 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:39 k8s-master-1 kubelet[2894775]: I0325 18:40:39.245565 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:39 k8s-master-1 kubelet[2894775]: I0325 18:40:39.247624 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:39 k8s-master-1 kubelet[2894775]: I0325 18:40:39.249590 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:39 k8s-master-1 kubelet[2894775]: I0325 18:40:39.251698 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:39 k8s-master-1 kubelet[2894775]: I0325 18:40:39.253318 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:39 k8s-master-1 kubelet[2894775]: E0325 18:40:39.467395 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:40:40 k8s-master-1 kubelet[2894775]: I0325 18:40:40.297637 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:40:40 k8s-master-1 kubelet[2894775]: E0325 18:40:40.299232 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: E0325 18:40:41.313674 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: E0325 18:40:41.422116 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: E0325 18:40:41.423406 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: E0325 18:40:41.424936 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: E0325 18:40:41.426406 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: E0325 18:40:41.427865 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: E0325 18:40:41.427975 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: W0325 18:40:41.820215 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:41 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: W0325 18:40:42.634164 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:42 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:43 k8s-master-1 kubelet[2894775]: I0325 18:40:43.328653 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:40:43 k8s-master-1 kubelet[2894775]: E0325 18:40:43.330002 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:40:43 k8s-master-1 kubelet[2894775]: I0325 18:40:43.337643 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:40:43 k8s-master-1 kubelet[2894775]: E0325 18:40:43.339990 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:40:44 k8s-master-1 kubelet[2894775]: I0325 18:40:44.245069 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:40:44 k8s-master-1 kubelet[2894775]: E0325 18:40:44.250410 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:40:45 k8s-master-1 kubelet[2894775]: E0325 18:40:45.471495 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:46 k8s-master-1 kubelet[2894775]: E0325 18:40:46.315672 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:47 k8s-master-1 kubelet[2894775]: E0325 18:40:47.102244 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:40:47 k8s-master-1 kubelet[2894775]: E0325 18:40:47.107669 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:40:47 k8s-master-1 kubelet[2894775]: E0325 18:40:47.107923 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:40:47 k8s-master-1 kubelet[2894775]: E0325 18:40:47.108057 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:40:47 k8s-master-1 kubelet[2894775]: I0325 18:40:47.108171 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: W0325 18:40:48.832182 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:48 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: W0325 18:40:49.157742 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: W0325 18:40:49.221362 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: I0325 18:40:49.245310 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: I0325 18:40:49.247102 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: I0325 18:40:49.248922 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: I0325 18:40:49.250663 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: I0325 18:40:49.252342 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: E0325 18:40:49.470730 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: W0325 18:40:49.870202 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:49 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: W0325 18:40:50.182538 2894775 container.go:590] Failed to update stats for container "/system.slice/kubelet.service": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:50 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:51 k8s-master-1 kubelet[2894775]: E0325 18:40:51.318037 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:51 k8s-master-1 kubelet[2894775]: E0325 18:40:51.627728 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:51 k8s-master-1 kubelet[2894775]: E0325 18:40:51.629399 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:51 k8s-master-1 kubelet[2894775]: E0325 18:40:51.630931 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:51 k8s-master-1 kubelet[2894775]: E0325 18:40:51.632687 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:51 k8s-master-1 kubelet[2894775]: E0325 18:40:51.634282 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:51 k8s-master-1 kubelet[2894775]: E0325 18:40:51.634391 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:40:52 k8s-master-1 kubelet[2894775]: I0325 18:40:52.299320 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:40:52 k8s-master-1 kubelet[2894775]: E0325 18:40:52.300713 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:40:52 k8s-master-1 kubelet[2894775]: E0325 18:40:52.473693 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:54 k8s-master-1 kubelet[2894775]: I0325 18:40:54.295138 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:40:54 k8s-master-1 kubelet[2894775]: E0325 18:40:54.298145 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:40:55 k8s-master-1 kubelet[2894775]: I0325 18:40:55.245715 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:40:55 k8s-master-1 kubelet[2894775]: E0325 18:40:55.248118 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:40:55 k8s-master-1 kubelet[2894775]: W0325 18:40:55.715539 2894775 reflector.go:324] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:55 k8s-master-1 kubelet[2894775]: E0325 18:40:55.715901 2894775 reflector.go:138] object-"kube-system"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:56 k8s-master-1 kubelet[2894775]: E0325 18:40:56.320435 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: E0325 18:40:57.151751 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: E0325 18:40:57.153658 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: E0325 18:40:57.155870 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: E0325 18:40:57.156050 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: I0325 18:40:57.156157 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: I0325 18:40:57.245007 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: E0325 18:40:57.246124 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: W0325 18:40:57.380854 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: W0325 18:40:57.427109 2894775 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: E0325 18:40:57.427355 2894775 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: W0325 18:40:57.471570 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:40:57 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:40:59 k8s-master-1 kubelet[2894775]: I0325 18:40:59.245414 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:59 k8s-master-1 kubelet[2894775]: I0325 18:40:59.247428 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:59 k8s-master-1 kubelet[2894775]: I0325 18:40:59.249296 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:59 k8s-master-1 kubelet[2894775]: I0325 18:40:59.251040 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:59 k8s-master-1 kubelet[2894775]: I0325 18:40:59.252708 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:40:59 k8s-master-1 kubelet[2894775]: E0325 18:40:59.473214 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16df2e5647290167", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5775", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 36, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:7352, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-scheduler-k8s-master-1.16df2e5647290167": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:40:59 k8s-master-1 kubelet[2894775]: E0325 18:40:59.474773 2894775 event.go:221] Unable to write event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-scheduler-k8s-master-1.16dfc40ca626a374", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-scheduler-k8s-master-1", UID:"b3c13021b6a6369c1aec32ee08be09d3", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-scheduler}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 7, 247092596, time.Local), Count:1, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}' (retry limit exceeded!) | |
Mar 25 18:40:59 k8s-master-1 kubelet[2894775]: E0325 18:40:59.475797 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:40:59 k8s-master-1 kubelet[2894775]: E0325 18:40:59.477755 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:41:01 k8s-master-1 kubelet[2894775]: E0325 18:41:01.323409 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:01 k8s-master-1 kubelet[2894775]: E0325 18:41:01.987079 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:01 k8s-master-1 kubelet[2894775]: E0325 18:41:01.989056 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:01 k8s-master-1 kubelet[2894775]: E0325 18:41:01.990873 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:01 k8s-master-1 kubelet[2894775]: E0325 18:41:01.992973 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:01 k8s-master-1 kubelet[2894775]: E0325 18:41:01.994907 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:01 k8s-master-1 kubelet[2894775]: E0325 18:41:01.995021 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:41:05 k8s-master-1 kubelet[2894775]: E0325 18:41:05.899123 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: W0325 18:41:06.165223 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: E0325 18:41:06.325740 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:06 k8s-master-1 kubelet[2894775]: E0325 18:41:06.478910 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:07 k8s-master-1 kubelet[2894775]: E0325 18:41:07.197295 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:41:07 k8s-master-1 kubelet[2894775]: E0325 18:41:07.201596 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:41:07 k8s-master-1 kubelet[2894775]: E0325 18:41:07.203485 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:41:07 k8s-master-1 kubelet[2894775]: E0325 18:41:07.203637 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:41:07 k8s-master-1 kubelet[2894775]: I0325 18:41:07.203728 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:41:07 k8s-master-1 kubelet[2894775]: I0325 18:41:07.300242 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:41:07 k8s-master-1 kubelet[2894775]: E0325 18:41:07.302091 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:41:08 k8s-master-1 kubelet[2894775]: I0325 18:41:08.320630 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:41:08 k8s-master-1 kubelet[2894775]: E0325 18:41:08.326314 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:41:09 k8s-master-1 kubelet[2894775]: I0325 18:41:09.244955 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:41:09 k8s-master-1 kubelet[2894775]: I0325 18:41:09.245077 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:09 k8s-master-1 kubelet[2894775]: I0325 18:41:09.246813 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:09 k8s-master-1 kubelet[2894775]: E0325 18:41:09.247162 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:41:09 k8s-master-1 kubelet[2894775]: I0325 18:41:09.249307 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:09 k8s-master-1 kubelet[2894775]: I0325 18:41:09.251263 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:09 k8s-master-1 kubelet[2894775]: I0325 18:41:09.253086 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: W0325 18:41:11.088706 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: I0325 18:41:11.290664 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: E0325 18:41:11.292517 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:41:11 k8s-master-1 kubelet[2894775]: E0325 18:41:11.327597 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:12 k8s-master-1 kubelet[2894775]: E0325 18:41:12.186586 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:12 k8s-master-1 kubelet[2894775]: E0325 18:41:12.188140 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:12 k8s-master-1 kubelet[2894775]: E0325 18:41:12.189728 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:12 k8s-master-1 kubelet[2894775]: E0325 18:41:12.191389 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:12 k8s-master-1 kubelet[2894775]: E0325 18:41:12.193402 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:12 k8s-master-1 kubelet[2894775]: E0325 18:41:12.193525 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:41:13 k8s-master-1 kubelet[2894775]: E0325 18:41:13.481380 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:15 k8s-master-1 kubelet[2894775]: E0325 18:41:15.901966 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:41:16 k8s-master-1 kubelet[2894775]: E0325 18:41:16.330151 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:17 k8s-master-1 kubelet[2894775]: E0325 18:41:17.247296 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:41:17 k8s-master-1 kubelet[2894775]: E0325 18:41:17.251729 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:41:17 k8s-master-1 kubelet[2894775]: E0325 18:41:17.252045 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:41:17 k8s-master-1 kubelet[2894775]: E0325 18:41:17.252187 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:41:17 k8s-master-1 kubelet[2894775]: I0325 18:41:17.252297 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: W0325 18:41:18.182869 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:18 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: I0325 18:41:19.246082 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: I0325 18:41:19.248123 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: I0325 18:41:19.249866 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: I0325 18:41:19.251587 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: I0325 18:41:19.254316 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: I0325 18:41:19.313703 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: E0325 18:41:19.315788 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: W0325 18:41:19.950807 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:19 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:20 k8s-master-1 kubelet[2894775]: I0325 18:41:20.303168 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:41:20 k8s-master-1 kubelet[2894775]: E0325 18:41:20.304850 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:41:20 k8s-master-1 kubelet[2894775]: E0325 18:41:20.483741 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:21 k8s-master-1 kubelet[2894775]: I0325 18:41:21.245686 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:41:21 k8s-master-1 kubelet[2894775]: E0325 18:41:21.249130 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:41:21 k8s-master-1 kubelet[2894775]: E0325 18:41:21.332486 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:22 k8s-master-1 kubelet[2894775]: E0325 18:41:22.337827 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:22 k8s-master-1 kubelet[2894775]: E0325 18:41:22.341697 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:22 k8s-master-1 kubelet[2894775]: E0325 18:41:22.343383 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:22 k8s-master-1 kubelet[2894775]: E0325 18:41:22.345106 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:22 k8s-master-1 kubelet[2894775]: E0325 18:41:22.346859 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:22 k8s-master-1 kubelet[2894775]: E0325 18:41:22.346978 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:41:24 k8s-master-1 kubelet[2894775]: I0325 18:41:24.244892 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:41:24 k8s-master-1 kubelet[2894775]: E0325 18:41:24.246636 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:41:25 k8s-master-1 kubelet[2894775]: E0325 18:41:25.904830 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: E0325 18:41:26.334997 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: W0325 18:41:26.358288 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:26 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: E0325 18:41:27.297623 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: W0325 18:41:27.301927 2894775 container.go:590] Failed to update stats for container "/kubepods.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: E0325 18:41:27.302318 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: E0325 18:41:27.302511 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: E0325 18:41:27.302645 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: I0325 18:41:27.302755 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: W0325 18:41:27.455269 2894775 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: E0325 18:41:27.455573 2894775 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:27 k8s-master-1 kubelet[2894775]: E0325 18:41:27.485916 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:29 k8s-master-1 kubelet[2894775]: I0325 18:41:29.245460 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:29 k8s-master-1 kubelet[2894775]: I0325 18:41:29.247143 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:29 k8s-master-1 kubelet[2894775]: I0325 18:41:29.249479 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:29 k8s-master-1 kubelet[2894775]: I0325 18:41:29.251318 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:29 k8s-master-1 kubelet[2894775]: I0325 18:41:29.253091 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:31 k8s-master-1 kubelet[2894775]: I0325 18:41:31.303048 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:41:31 k8s-master-1 kubelet[2894775]: E0325 18:41:31.304847 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:41:31 k8s-master-1 kubelet[2894775]: E0325 18:41:31.336735 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:32 k8s-master-1 kubelet[2894775]: E0325 18:41:32.455650 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:32 k8s-master-1 kubelet[2894775]: E0325 18:41:32.457201 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:32 k8s-master-1 kubelet[2894775]: E0325 18:41:32.458873 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:32 k8s-master-1 kubelet[2894775]: E0325 18:41:32.460662 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:32 k8s-master-1 kubelet[2894775]: E0325 18:41:32.462231 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:32 k8s-master-1 kubelet[2894775]: E0325 18:41:32.462344 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:41:33 k8s-master-1 kubelet[2894775]: I0325 18:41:33.308947 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:41:33 k8s-master-1 kubelet[2894775]: E0325 18:41:33.311925 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:41:34 k8s-master-1 kubelet[2894775]: E0325 18:41:34.488859 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:35 k8s-master-1 kubelet[2894775]: I0325 18:41:35.244805 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:41:35 k8s-master-1 kubelet[2894775]: E0325 18:41:35.248417 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:41:35 k8s-master-1 kubelet[2894775]: E0325 18:41:35.907542 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:41:36 k8s-master-1 kubelet[2894775]: E0325 18:41:36.339213 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:37 k8s-master-1 kubelet[2894775]: E0325 18:41:37.331675 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:41:37 k8s-master-1 kubelet[2894775]: E0325 18:41:37.332915 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:41:37 k8s-master-1 kubelet[2894775]: E0325 18:41:37.334355 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:41:37 k8s-master-1 kubelet[2894775]: E0325 18:41:37.334473 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:41:37 k8s-master-1 kubelet[2894775]: I0325 18:41:37.334545 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:41:38 k8s-master-1 kubelet[2894775]: I0325 18:41:38.244806 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:41:38 k8s-master-1 kubelet[2894775]: E0325 18:41:38.246654 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:41:39 k8s-master-1 kubelet[2894775]: I0325 18:41:39.245629 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:39 k8s-master-1 kubelet[2894775]: I0325 18:41:39.250141 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:39 k8s-master-1 kubelet[2894775]: I0325 18:41:39.252225 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:39 k8s-master-1 kubelet[2894775]: I0325 18:41:39.254040 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:39 k8s-master-1 kubelet[2894775]: I0325 18:41:39.255887 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:41 k8s-master-1 kubelet[2894775]: E0325 18:41:41.341380 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:41 k8s-master-1 kubelet[2894775]: E0325 18:41:41.491483 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: E0325 18:41:42.723953 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: E0325 18:41:42.725448 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: E0325 18:41:42.726841 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: E0325 18:41:42.728634 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: E0325 18:41:42.730119 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: E0325 18:41:42.730229 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: W0325 18:41:42.829295 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:42 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:43 k8s-master-1 kubelet[2894775]: I0325 18:41:43.303990 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:41:43 k8s-master-1 kubelet[2894775]: E0325 18:41:43.306033 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:41:45 k8s-master-1 kubelet[2894775]: E0325 18:41:45.910286 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:41:46 k8s-master-1 kubelet[2894775]: E0325 18:41:46.344669 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:47 k8s-master-1 kubelet[2894775]: I0325 18:41:47.302224 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:41:47 k8s-master-1 kubelet[2894775]: E0325 18:41:47.305651 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:41:47 k8s-master-1 kubelet[2894775]: E0325 18:41:47.367531 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:41:47 k8s-master-1 kubelet[2894775]: E0325 18:41:47.369087 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:41:47 k8s-master-1 kubelet[2894775]: E0325 18:41:47.370701 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:41:47 k8s-master-1 kubelet[2894775]: E0325 18:41:47.370835 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:41:47 k8s-master-1 kubelet[2894775]: I0325 18:41:47.370921 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: E0325 18:41:48.493547 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: W0325 18:41:48.518209 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:48 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:49 k8s-master-1 kubelet[2894775]: I0325 18:41:49.244937 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:49 k8s-master-1 kubelet[2894775]: I0325 18:41:49.247364 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:49 k8s-master-1 kubelet[2894775]: I0325 18:41:49.249186 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:49 k8s-master-1 kubelet[2894775]: I0325 18:41:49.250968 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:49 k8s-master-1 kubelet[2894775]: I0325 18:41:49.252787 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:50 k8s-master-1 kubelet[2894775]: I0325 18:41:50.245404 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:41:50 k8s-master-1 kubelet[2894775]: E0325 18:41:50.248850 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: W0325 18:41:51.264344 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:51 k8s-master-1 kubelet[2894775]: E0325 18:41:51.346683 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: W0325 18:41:52.137514 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: I0325 18:41:52.244995 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: E0325 18:41:52.246526 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: W0325 18:41:52.254831 2894775 reflector.go:324] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: E0325 18:41:52.255081 2894775 reflector.go:138] object-"kube-system"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: W0325 18:41:52.903160 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:52 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:53 k8s-master-1 kubelet[2894775]: E0325 18:41:53.049132 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:53 k8s-master-1 kubelet[2894775]: E0325 18:41:53.050804 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:53 k8s-master-1 kubelet[2894775]: E0325 18:41:53.052531 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:53 k8s-master-1 kubelet[2894775]: E0325 18:41:53.054100 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:53 k8s-master-1 kubelet[2894775]: E0325 18:41:53.055649 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:53 k8s-master-1 kubelet[2894775]: E0325 18:41:53.055768 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:41:55 k8s-master-1 kubelet[2894775]: E0325 18:41:55.496481 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:41:55 k8s-master-1 kubelet[2894775]: E0325 18:41:55.914352 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: W0325 18:41:56.129196 2894775 container.go:590] Failed to update stats for container "/system.slice/kubelet.service": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:56 k8s-master-1 kubelet[2894775]: E0325 18:41:56.348889 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:41:57 k8s-master-1 kubelet[2894775]: I0325 18:41:57.325296 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:41:57 k8s-master-1 kubelet[2894775]: E0325 18:41:57.326756 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:41:57 k8s-master-1 kubelet[2894775]: E0325 18:41:57.404153 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:41:57 k8s-master-1 kubelet[2894775]: E0325 18:41:57.407881 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:41:57 k8s-master-1 kubelet[2894775]: E0325 18:41:57.408668 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:41:57 k8s-master-1 kubelet[2894775]: E0325 18:41:57.408805 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:41:57 k8s-master-1 kubelet[2894775]: I0325 18:41:57.409204 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: W0325 18:41:58.737414 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:41:58 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:41:59 k8s-master-1 kubelet[2894775]: I0325 18:41:59.246096 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:59 k8s-master-1 kubelet[2894775]: I0325 18:41:59.248225 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:59 k8s-master-1 kubelet[2894775]: I0325 18:41:59.250021 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:59 k8s-master-1 kubelet[2894775]: I0325 18:41:59.252263 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:59 k8s-master-1 kubelet[2894775]: I0325 18:41:59.255567 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:41:59 k8s-master-1 kubelet[2894775]: I0325 18:41:59.301316 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:41:59 k8s-master-1 kubelet[2894775]: E0325 18:41:59.303507 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:42:01 k8s-master-1 kubelet[2894775]: I0325 18:42:01.245364 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:42:01 k8s-master-1 kubelet[2894775]: E0325 18:42:01.248778 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:42:01 k8s-master-1 kubelet[2894775]: E0325 18:42:01.351283 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:02 k8s-master-1 kubelet[2894775]: E0325 18:42:02.498888 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: E0325 18:42:03.092239 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: E0325 18:42:03.093821 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: E0325 18:42:03.096142 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: E0325 18:42:03.097706 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: E0325 18:42:03.099999 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: E0325 18:42:03.100123 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: W0325 18:42:03.549706 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:03 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: I0325 18:42:05.245310 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: E0325 18:42:05.247117 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: W0325 18:42:05.571552 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:05 k8s-master-1 kubelet[2894775]: E0325 18:42:05.917376 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:42:06 k8s-master-1 kubelet[2894775]: E0325 18:42:06.354347 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:07 k8s-master-1 kubelet[2894775]: E0325 18:42:07.444104 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:42:07 k8s-master-1 kubelet[2894775]: E0325 18:42:07.445750 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:42:07 k8s-master-1 kubelet[2894775]: E0325 18:42:07.447556 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:42:07 k8s-master-1 kubelet[2894775]: E0325 18:42:07.447725 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:42:07 k8s-master-1 kubelet[2894775]: I0325 18:42:07.447876 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:42:09 k8s-master-1 kubelet[2894775]: I0325 18:42:09.245306 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:09 k8s-master-1 kubelet[2894775]: I0325 18:42:09.249764 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:09 k8s-master-1 kubelet[2894775]: I0325 18:42:09.251441 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:09 k8s-master-1 kubelet[2894775]: I0325 18:42:09.253258 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:09 k8s-master-1 kubelet[2894775]: I0325 18:42:09.254849 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:09 k8s-master-1 kubelet[2894775]: E0325 18:42:09.501117 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:11 k8s-master-1 kubelet[2894775]: I0325 18:42:11.300926 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:42:11 k8s-master-1 kubelet[2894775]: E0325 18:42:11.303340 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:42:11 k8s-master-1 kubelet[2894775]: I0325 18:42:11.312481 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:42:11 k8s-master-1 kubelet[2894775]: E0325 18:42:11.313662 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:42:11 k8s-master-1 kubelet[2894775]: E0325 18:42:11.357039 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:13 k8s-master-1 kubelet[2894775]: E0325 18:42:13.475763 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:13 k8s-master-1 kubelet[2894775]: E0325 18:42:13.480523 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:13 k8s-master-1 kubelet[2894775]: E0325 18:42:13.482551 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:13 k8s-master-1 kubelet[2894775]: E0325 18:42:13.484993 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:13 k8s-master-1 kubelet[2894775]: E0325 18:42:13.487335 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:13 k8s-master-1 kubelet[2894775]: E0325 18:42:13.487460 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:42:15 k8s-master-1 kubelet[2894775]: E0325 18:42:15.920525 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:42:16 k8s-master-1 kubelet[2894775]: I0325 18:42:16.296747 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:42:16 k8s-master-1 kubelet[2894775]: E0325 18:42:16.300891 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:42:16 k8s-master-1 kubelet[2894775]: E0325 18:42:16.359179 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:16 k8s-master-1 kubelet[2894775]: E0325 18:42:16.504494 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:17 k8s-master-1 kubelet[2894775]: E0325 18:42:17.491720 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:42:17 k8s-master-1 kubelet[2894775]: E0325 18:42:17.496112 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:42:17 k8s-master-1 kubelet[2894775]: E0325 18:42:17.496343 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:42:17 k8s-master-1 kubelet[2894775]: E0325 18:42:17.496469 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:42:17 k8s-master-1 kubelet[2894775]: I0325 18:42:17.496576 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: W0325 18:42:19.116983 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: I0325 18:42:19.245597 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: I0325 18:42:19.247682 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: I0325 18:42:19.249729 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: I0325 18:42:19.251861 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:19 k8s-master-1 kubelet[2894775]: I0325 18:42:19.254234 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: I0325 18:42:20.244636 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: E0325 18:42:20.247198 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: W0325 18:42:20.356978 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:20 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: E0325 18:42:21.361466 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: W0325 18:42:21.380526 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:21 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: I0325 18:42:23.247029 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: E0325 18:42:23.249430 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: I0325 18:42:23.308740 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: E0325 18:42:23.310863 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: E0325 18:42:23.506766 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: E0325 18:42:23.630063 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: E0325 18:42:23.631559 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: E0325 18:42:23.633214 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: E0325 18:42:23.634734 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: E0325 18:42:23.638023 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: E0325 18:42:23.638208 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: W0325 18:42:23.761552 2894775 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: E0325 18:42:23.761825 2894775 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: W0325 18:42:23.997129 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:23 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:25 k8s-master-1 kubelet[2894775]: E0325 18:42:25.926073 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:42:26 k8s-master-1 kubelet[2894775]: E0325 18:42:26.365038 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: E0325 18:42:27.542431 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: E0325 18:42:27.544402 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: W0325 18:42:27.546466 2894775 container.go:590] Failed to update stats for container "/kubepods.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: E0325 18:42:27.546815 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: E0325 18:42:27.546977 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:42:27 k8s-master-1 kubelet[2894775]: I0325 18:42:27.547082 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:42:29 k8s-master-1 kubelet[2894775]: I0325 18:42:29.245869 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:29 k8s-master-1 kubelet[2894775]: I0325 18:42:29.248022 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:29 k8s-master-1 kubelet[2894775]: I0325 18:42:29.250169 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:29 k8s-master-1 kubelet[2894775]: I0325 18:42:29.252357 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:29 k8s-master-1 kubelet[2894775]: I0325 18:42:29.254195 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:29 k8s-master-1 kubelet[2894775]: E0325 18:42:29.703950 2894775 projected.go:199] Error preparing data for projected volume kube-api-access-twh7b for pod kube-system/kube-proxy-fgm87: failed to fetch token: Post "https://192.168.1.194:6443/api/v1/namespaces/kube-system/serviceaccounts/kube-proxy/token": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:29 k8s-master-1 kubelet[2894775]: E0325 18:42:29.704526 2894775 nestedpendingoperations.go:335] Operation for "{volumeName:kubernetes.io/projected/95ab2fd8-d229-475f-b404-59e5ad925195-kube-api-access-twh7b podName:95ab2fd8-d229-475f-b404-59e5ad925195 nodeName:}" failed. No retries permitted until 2022-03-25 18:44:31.704334047 -0500 CDT m=+165038.094023102 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-twh7b" (UniqueName: "kubernetes.io/projected/95ab2fd8-d229-475f-b404-59e5ad925195-kube-api-access-twh7b") pod "kube-proxy-fgm87" (UID: "95ab2fd8-d229-475f-b404-59e5ad925195") : failed to fetch token: Post "https://192.168.1.194:6443/api/v1/namespaces/kube-system/serviceaccounts/kube-proxy/token": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:30 k8s-master-1 kubelet[2894775]: E0325 18:42:30.509635 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:30 k8s-master-1 kubelet[2894775]: W0325 18:42:30.589594 2894775 reflector.go:324] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:30 k8s-master-1 kubelet[2894775]: E0325 18:42:30.589924 2894775 reflector.go:138] object-"kube-system"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:31 k8s-master-1 kubelet[2894775]: I0325 18:42:31.316531 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:42:31 k8s-master-1 kubelet[2894775]: E0325 18:42:31.319124 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:42:31 k8s-master-1 kubelet[2894775]: E0325 18:42:31.366644 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:33 k8s-master-1 kubelet[2894775]: E0325 18:42:33.942311 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:33 k8s-master-1 kubelet[2894775]: E0325 18:42:33.943673 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:33 k8s-master-1 kubelet[2894775]: E0325 18:42:33.945335 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:33 k8s-master-1 kubelet[2894775]: E0325 18:42:33.947039 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:33 k8s-master-1 kubelet[2894775]: E0325 18:42:33.948668 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:33 k8s-master-1 kubelet[2894775]: E0325 18:42:33.948796 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:42:34 k8s-master-1 kubelet[2894775]: I0325 18:42:34.245538 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:42:34 k8s-master-1 kubelet[2894775]: I0325 18:42:34.245832 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:42:34 k8s-master-1 kubelet[2894775]: E0325 18:42:34.247629 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:42:34 k8s-master-1 kubelet[2894775]: E0325 18:42:34.247772 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:42:34 k8s-master-1 kubelet[2894775]: I0325 18:42:34.310009 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:42:34 k8s-master-1 kubelet[2894775]: E0325 18:42:34.311861 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:42:35 k8s-master-1 kubelet[2894775]: E0325 18:42:35.929232 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:42:36 k8s-master-1 kubelet[2894775]: E0325 18:42:36.368867 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:37 k8s-master-1 kubelet[2894775]: E0325 18:42:37.512735 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:37 k8s-master-1 kubelet[2894775]: E0325 18:42:37.588970 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:42:37 k8s-master-1 kubelet[2894775]: E0325 18:42:37.593027 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:42:37 k8s-master-1 kubelet[2894775]: E0325 18:42:37.593217 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:42:37 k8s-master-1 kubelet[2894775]: E0325 18:42:37.593321 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:42:37 k8s-master-1 kubelet[2894775]: I0325 18:42:37.593426 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:42:39 k8s-master-1 kubelet[2894775]: I0325 18:42:38.960311 2894775 scope.go:110] "RemoveContainer" containerID="c657821326c36f46446a2fef0708e87817cb0eae8affed9aaff0f9896873ff26" | |
Mar 25 18:42:39 k8s-master-1 kubelet[2894775]: I0325 18:42:39.254391 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:39 k8s-master-1 kubelet[2894775]: I0325 18:42:39.255878 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:39 k8s-master-1 kubelet[2894775]: I0325 18:42:39.257390 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:39 k8s-master-1 kubelet[2894775]: I0325 18:42:39.258639 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:39 k8s-master-1 kubelet[2894775]: I0325 18:42:39.259988 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:39 k8s-master-1 kubelet[2894775]: I0325 18:42:39.967054 2894775 pod_container_deletor.go:79] "Container not found in pod's containers" containerID="be8ee9627388e1a5513fc5ab398169c81610fd78e5e1a79c7055894145a3e9e8" | |
Mar 25 18:42:39 k8s-master-1 kubelet[2894775]: I0325 18:42:39.969187 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: E0325 18:42:40.184075 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-controller-manager pod=kube-controller-manager-k8s-master-1_kube-system(737409118e728d54b637db0904da4725)\"" pod="kube-system/kube-controller-manager-k8s-master-1" podUID=737409118e728d54b637db0904da4725 | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: W0325 18:42:40.211390 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:371fea832103aaf9816eaadfe599a93e3f242def50557acdb93a3d60481838a9": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: I0325 18:42:40.975614 2894775 scope.go:110] "RemoveContainer" containerID="5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6" | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: I0325 18:42:40.976319 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:40 k8s-master-1 kubelet[2894775]: E0325 18:42:40.976950 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-controller-manager pod=kube-controller-manager-k8s-master-1_kube-system(737409118e728d54b637db0904da4725)\"" pod="kube-system/kube-controller-manager-k8s-master-1" podUID=737409118e728d54b637db0904da4725 | |
Mar 25 18:42:41 k8s-master-1 kubelet[2894775]: E0325 18:42:41.371099 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:41 k8s-master-1 kubelet[2894775]: I0325 18:42:41.978137 2894775 scope.go:110] "RemoveContainer" containerID="5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6" | |
Mar 25 18:42:41 k8s-master-1 kubelet[2894775]: E0325 18:42:41.979417 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-controller-manager pod=kube-controller-manager-k8s-master-1_kube-system(737409118e728d54b637db0904da4725)\"" pod="kube-system/kube-controller-manager-k8s-master-1" podUID=737409118e728d54b637db0904da4725 | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: W0325 18:42:43.273129 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:43 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:44 k8s-master-1 kubelet[2894775]: E0325 18:42:44.000456 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:44 k8s-master-1 kubelet[2894775]: E0325 18:42:44.002677 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:44 k8s-master-1 kubelet[2894775]: E0325 18:42:44.005251 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:44 k8s-master-1 kubelet[2894775]: E0325 18:42:44.007278 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:44 k8s-master-1 kubelet[2894775]: E0325 18:42:44.008597 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:44 k8s-master-1 kubelet[2894775]: E0325 18:42:44.008691 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:42:44 k8s-master-1 kubelet[2894775]: E0325 18:42:44.515026 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:45 k8s-master-1 kubelet[2894775]: E0325 18:42:45.932562 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16df2e53ff7044a4", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5774", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 55, 26, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:7465, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/etcd-k8s-master-1.16df2e53ff7044a4": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:42:45 k8s-master-1 kubelet[2894775]: E0325 18:42:45.935763 2894775 event.go:221] Unable to write event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-k8s-master-1.16dfc40d1d65f452", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-k8s-master-1", UID:"05adb7cc8b7ee5eaafe9a21465fbf19c", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 247730770, time.Local), Count:1, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}' (retry limit exceeded!) | |
Mar 25 18:42:45 k8s-master-1 kubelet[2894775]: E0325 18:42:45.938622 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 310210529, time.Local), Count:8511, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:42:46 k8s-master-1 kubelet[2894775]: I0325 18:42:46.245154 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:42:46 k8s-master-1 kubelet[2894775]: I0325 18:42:46.245617 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:42:46 k8s-master-1 kubelet[2894775]: E0325 18:42:46.247442 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:42:46 k8s-master-1 kubelet[2894775]: E0325 18:42:46.248959 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:42:46 k8s-master-1 kubelet[2894775]: E0325 18:42:46.373713 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:47 k8s-master-1 kubelet[2894775]: E0325 18:42:47.627668 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:371fea832103aaf9816eaadfe599a93e3f242def50557acdb93a3d60481838a9\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:42:47 k8s-master-1 kubelet[2894775]: E0325 18:42:47.628257 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:42:47 k8s-master-1 kubelet[2894775]: E0325 18:42:47.629707 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:42:47 k8s-master-1 kubelet[2894775]: E0325 18:42:47.629820 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:42:47 k8s-master-1 kubelet[2894775]: I0325 18:42:47.629910 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:42:48 k8s-master-1 kubelet[2894775]: I0325 18:42:48.402482 2894775 scope.go:110] "RemoveContainer" containerID="5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6" | |
Mar 25 18:42:48 k8s-master-1 kubelet[2894775]: E0325 18:42:48.405704 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-controller-manager pod=kube-controller-manager-k8s-master-1_kube-system(737409118e728d54b637db0904da4725)\"" pod="kube-system/kube-controller-manager-k8s-master-1" podUID=737409118e728d54b637db0904da4725 | |
Mar 25 18:42:48 k8s-master-1 kubelet[2894775]: E0325 18:42:48.738554 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 310210529, time.Local), Count:8511, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: I0325 18:42:49.244599 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: I0325 18:42:49.244632 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: I0325 18:42:49.246402 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: E0325 18:42:49.246696 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: I0325 18:42:49.249931 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: I0325 18:42:49.253024 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: I0325 18:42:49.255287 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: I0325 18:42:49.305941 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: E0325 18:42:49.307790 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: I0325 18:42:49.747401 2894775 scope.go:110] "RemoveContainer" containerID="5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6" | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: E0325 18:42:49.749951 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-controller-manager pod=kube-controller-manager-k8s-master-1_kube-system(737409118e728d54b637db0904da4725)\"" pod="kube-system/kube-controller-manager-k8s-master-1" podUID=737409118e728d54b637db0904da4725 | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: W0325 18:42:49.841455 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:49 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:51 k8s-master-1 kubelet[2894775]: E0325 18:42:51.375790 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:51 k8s-master-1 kubelet[2894775]: E0325 18:42:51.518640 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:54 k8s-master-1 kubelet[2894775]: E0325 18:42:54.246323 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:54 k8s-master-1 kubelet[2894775]: E0325 18:42:54.248444 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:54 k8s-master-1 kubelet[2894775]: E0325 18:42:54.250202 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:54 k8s-master-1 kubelet[2894775]: E0325 18:42:54.251647 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:54 k8s-master-1 kubelet[2894775]: E0325 18:42:54.253104 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:54 k8s-master-1 kubelet[2894775]: E0325 18:42:54.253211 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:42:55 k8s-master-1 kubelet[2894775]: W0325 18:42:55.004126 2894775 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:55 k8s-master-1 kubelet[2894775]: E0325 18:42:55.004411 2894775 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:56 k8s-master-1 kubelet[2894775]: E0325 18:42:56.377759 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: I0325 18:42:57.245500 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: E0325 18:42:57.247722 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: W0325 18:42:57.348264 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: E0325 18:42:57.668575 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:371fea832103aaf9816eaadfe599a93e3f242def50557acdb93a3d60481838a9\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: E0325 18:42:57.671305 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: E0325 18:42:57.671531 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: E0325 18:42:57.671662 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:42:57 k8s-master-1 kubelet[2894775]: I0325 18:42:57.671775 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:42:58 k8s-master-1 kubelet[2894775]: E0325 18:42:58.521129 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:42:58 k8s-master-1 kubelet[2894775]: E0325 18:42:58.741681 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 310210529, time.Local), Count:8511, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: W0325 18:42:59.060484 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: I0325 18:42:59.245405 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: I0325 18:42:59.246999 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: I0325 18:42:59.248588 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: I0325 18:42:59.250102 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: I0325 18:42:59.251472 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: W0325 18:42:59.816431 2894775 container.go:590] Failed to update stats for container "/system.slice/kubelet.service": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:42:59 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: I0325 18:43:00.245625 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: I0325 18:43:00.245905 2894775 scope.go:110] "RemoveContainer" containerID="5f20619e1f124ba0cc9fdc3902df486344044dc32b537ab44b1d4160682f4fd6" | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: E0325 18:43:00.249913 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: W0325 18:43:00.876693 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:6aaf46bad5103e3654d15c59def32d4437dae89957977adc23c00f1530844914": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:43:00 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:43:01 k8s-master-1 kubelet[2894775]: I0325 18:43:01.075245 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:01 k8s-master-1 kubelet[2894775]: I0325 18:43:01.306276 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:43:01 k8s-master-1 kubelet[2894775]: E0325 18:43:01.307077 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:43:01 k8s-master-1 kubelet[2894775]: E0325 18:43:01.379524 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:43:03 k8s-master-1 kubelet[2894775]: I0325 18:43:03.289545 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:43:03 k8s-master-1 kubelet[2894775]: E0325 18:43:03.290640 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:43:04 k8s-master-1 kubelet[2894775]: E0325 18:43:04.270973 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:04 k8s-master-1 kubelet[2894775]: E0325 18:43:04.271787 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:04 k8s-master-1 kubelet[2894775]: E0325 18:43:04.272621 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:04 k8s-master-1 kubelet[2894775]: E0325 18:43:04.273395 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:04 k8s-master-1 kubelet[2894775]: E0325 18:43:04.274121 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:04 k8s-master-1 kubelet[2894775]: E0325 18:43:04.274174 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:43:05 k8s-master-1 kubelet[2894775]: E0325 18:43:05.523380 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: W0325 18:43:06.144531 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:43:06 k8s-master-1 kubelet[2894775]: E0325 18:43:06.382345 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:43:07 k8s-master-1 kubelet[2894775]: E0325 18:43:07.698532 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:6aaf46bad5103e3654d15c59def32d4437dae89957977adc23c00f1530844914\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:371fea832103aaf9816eaadfe599a93e3f242def50557acdb93a3d60481838a9\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:43:07 k8s-master-1 kubelet[2894775]: E0325 18:43:07.700856 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:43:07 k8s-master-1 kubelet[2894775]: E0325 18:43:07.701797 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:43:07 k8s-master-1 kubelet[2894775]: E0325 18:43:07.701878 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:43:07 k8s-master-1 kubelet[2894775]: I0325 18:43:07.701927 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: W0325 18:43:08.078714 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: E0325 18:43:08.743527 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 310210529, time.Local), Count:8511, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: W0325 18:43:08.981617 2894775 reflector.go:324] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:43:08 k8s-master-1 kubelet[2894775]: E0325 18:43:08.981781 2894775 reflector.go:138] object-"kube-system"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get "https://192.168.1.194:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:43:09 k8s-master-1 kubelet[2894775]: I0325 18:43:09.244149 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:09 k8s-master-1 kubelet[2894775]: I0325 18:43:09.246151 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:09 k8s-master-1 kubelet[2894775]: I0325 18:43:09.247098 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:09 k8s-master-1 kubelet[2894775]: I0325 18:43:09.248069 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:09 k8s-master-1 kubelet[2894775]: I0325 18:43:09.249076 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:10 k8s-master-1 kubelet[2894775]: I0325 18:43:10.267211 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:43:10 k8s-master-1 kubelet[2894775]: E0325 18:43:10.269134 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:43:11 k8s-master-1 kubelet[2894775]: E0325 18:43:11.384831 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:43:12 k8s-master-1 kubelet[2894775]: E0325 18:43:12.525611 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:43:13 k8s-master-1 kubelet[2894775]: I0325 18:43:13.245575 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:43:13 k8s-master-1 kubelet[2894775]: E0325 18:43:13.249056 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:43:14 k8s-master-1 kubelet[2894775]: E0325 18:43:14.395186 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:14 k8s-master-1 kubelet[2894775]: E0325 18:43:14.396811 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:14 k8s-master-1 kubelet[2894775]: E0325 18:43:14.398378 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:14 k8s-master-1 kubelet[2894775]: E0325 18:43:14.400379 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:14 k8s-master-1 kubelet[2894775]: E0325 18:43:14.402719 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:14 k8s-master-1 kubelet[2894775]: E0325 18:43:14.402830 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:43:16 k8s-master-1 kubelet[2894775]: I0325 18:43:16.302268 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:43:16 k8s-master-1 kubelet[2894775]: E0325 18:43:16.305732 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:43:16 k8s-master-1 kubelet[2894775]: E0325 18:43:16.387477 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:43:17 k8s-master-1 kubelet[2894775]: I0325 18:43:17.317720 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:43:17 k8s-master-1 kubelet[2894775]: E0325 18:43:17.321661 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:43:17 k8s-master-1 kubelet[2894775]: E0325 18:43:17.742218 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:371fea832103aaf9816eaadfe599a93e3f242def50557acdb93a3d60481838a9\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:6aaf46bad5103e3654d15c59def32d4437dae89957977adc23c00f1530844914\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:43:17 k8s-master-1 kubelet[2894775]: E0325 18:43:17.744347 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:43:17 k8s-master-1 kubelet[2894775]: E0325 18:43:17.746141 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:43:17 k8s-master-1 kubelet[2894775]: E0325 18:43:17.746293 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:43:17 k8s-master-1 kubelet[2894775]: I0325 18:43:17.746384 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: I0325 18:43:18.442842 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: I0325 18:43:18.444343 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: E0325 18:43:18.745505 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 310210529, time.Local), Count:8511, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: W0325 18:43:18.843888 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:43:18 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:43:19 k8s-master-1 kubelet[2894775]: I0325 18:43:19.244823 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:19 k8s-master-1 kubelet[2894775]: I0325 18:43:19.246769 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:19 k8s-master-1 kubelet[2894775]: I0325 18:43:19.248695 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:19 k8s-master-1 kubelet[2894775]: I0325 18:43:19.251644 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:19 k8s-master-1 kubelet[2894775]: I0325 18:43:19.253506 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:19 k8s-master-1 kubelet[2894775]: E0325 18:43:19.528400 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:43:21 k8s-master-1 kubelet[2894775]: I0325 18:43:21.245438 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:43:21 k8s-master-1 kubelet[2894775]: E0325 18:43:21.247670 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:43:21 k8s-master-1 kubelet[2894775]: E0325 18:43:21.389159 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:43:24 k8s-master-1 kubelet[2894775]: E0325 18:43:24.500514 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:24 k8s-master-1 kubelet[2894775]: E0325 18:43:24.505660 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:24 k8s-master-1 kubelet[2894775]: E0325 18:43:24.507247 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:24 k8s-master-1 kubelet[2894775]: E0325 18:43:24.509571 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:24 k8s-master-1 kubelet[2894775]: E0325 18:43:24.511459 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:24 k8s-master-1 kubelet[2894775]: E0325 18:43:24.511589 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:43:25 k8s-master-1 kubelet[2894775]: I0325 18:43:25.245496 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:43:25 k8s-master-1 kubelet[2894775]: E0325 18:43:25.249283 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: E0325 18:43:26.391636 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: E0325 18:43:26.533635 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: W0325 18:43:26.608315 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:43:26 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: I0325 18:43:27.318522 2894775 scope.go:110] "RemoveContainer" containerID="81dc2526e13d0614ac073735f738a4708a847dbd1170959d75b4e220750d0c62" | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: E0325 18:43:27.319650 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-proxy pod=kube-proxy-fgm87_kube-system(95ab2fd8-d229-475f-b404-59e5ad925195)\"" pod="kube-system/kube-proxy-fgm87" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: E0325 18:43:27.785550 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:371fea832103aaf9816eaadfe599a93e3f242def50557acdb93a3d60481838a9\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:6aaf46bad5103e3654d15c59def32d4437dae89957977adc23c00f1530844914\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: W0325 18:43:27.789388 2894775 container.go:590] Failed to update stats for container "/kubepods.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: E0325 18:43:27.789697 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: E0325 18:43:27.789862 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: E0325 18:43:27.789975 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:43:27 k8s-master-1 kubelet[2894775]: I0325 18:43:27.790064 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:43:28 k8s-master-1 kubelet[2894775]: E0325 18:43:28.748657 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 310210529, time.Local), Count:8511, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:43:29 k8s-master-1 kubelet[2894775]: I0325 18:43:29.245303 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:29 k8s-master-1 kubelet[2894775]: I0325 18:43:29.247265 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:29 k8s-master-1 kubelet[2894775]: I0325 18:43:29.249585 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:29 k8s-master-1 kubelet[2894775]: I0325 18:43:29.252648 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:29 k8s-master-1 kubelet[2894775]: I0325 18:43:29.254486 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: W0325 18:43:30.669608 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:43:30 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:43:31 k8s-master-1 kubelet[2894775]: E0325 18:43:31.394188 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: W0325 18:43:32.267440 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: I0325 18:43:32.304411 2894775 scope.go:110] "RemoveContainer" containerID="e6e461b9c6f2cc1c82ad91752c22bfc1d90f9b792925f881d6a46a66e006e32e" | |
Mar 25 18:43:32 k8s-master-1 kubelet[2894775]: E0325 18:43:32.306603 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-scheduler pod=kube-scheduler-k8s-master-1_kube-system(b3c13021b6a6369c1aec32ee08be09d3)\"" pod="kube-system/kube-scheduler-k8s-master-1" podUID=b3c13021b6a6369c1aec32ee08be09d3 | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: W0325 18:43:33.149319 2894775 container.go:590] Failed to update stats for container "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw.(*rawContainerHandler).GetStats | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/raw/handler.go:233 | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).updateStats | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:641 | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeepingTick | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:587 | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).housekeeping | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager/container.go:535 | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: runtime.goexit | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: /usr/local/go/src/runtime/asm_arm64.s:1133], continuing to push stats | |
Mar 25 18:43:33 k8s-master-1 kubelet[2894775]: E0325 18:43:33.535691 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:43:34 k8s-master-1 kubelet[2894775]: E0325 18:43:34.903445 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?resourceVersion=0&timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:34 k8s-master-1 kubelet[2894775]: E0325 18:43:34.904935 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:34 k8s-master-1 kubelet[2894775]: E0325 18:43:34.906283 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:34 k8s-master-1 kubelet[2894775]: E0325 18:43:34.907638 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:34 k8s-master-1 kubelet[2894775]: E0325 18:43:34.908992 2894775 kubelet_node_status.go:460] "Error updating node status, will retry" err="error getting node \"k8s-master-1\": Get \"https://192.168.1.194:6443/api/v1/nodes/k8s-master-1?timeout=10s\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:34 k8s-master-1 kubelet[2894775]: E0325 18:43:34.909089 2894775 kubelet_node_status.go:447] "Unable to update node status" err="update node status exceeds retry count" | |
Mar 25 18:43:35 k8s-master-1 kubelet[2894775]: I0325 18:43:35.244965 2894775 scope.go:110] "RemoveContainer" containerID="b1aaf2465fd520bd0f333b91af1bbffa27811672de4b933c50a756cdf57480fd" | |
Mar 25 18:43:35 k8s-master-1 kubelet[2894775]: E0325 18:43:35.247687 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"etcd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=etcd pod=etcd-k8s-master-1_kube-system(05adb7cc8b7ee5eaafe9a21465fbf19c)\"" pod="kube-system/etcd-k8s-master-1" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c | |
Mar 25 18:43:36 k8s-master-1 kubelet[2894775]: E0325 18:43:36.396071 2894775 kubelet.go:2347] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized" | |
Mar 25 18:43:37 k8s-master-1 kubelet[2894775]: E0325 18:43:37.832756 2894775 cadvisor_stats_provider.go:414] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice:cri-containerd:7a1bea032cb828c5ada6ff67e6d42b7e09bfb8ce927f393891996b3f77672d4f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podfe84e60149ccca0a4ab306df6e279110.slice:cri-containerd:2b4e030bb8cc0e9d7af48533a1b25472c890206607fedae7be0eb2ebab1e469d\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice:cri-containerd:50cb2d62db5451a910be86944217551751b347e4e84543db2bc2623f8a4827a0\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:6aaf46bad5103e3654d15c59def32d4437dae89957977adc23c00f1530844914\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice:cri-containerd:300c8b244704733949d9fb283670ea83aeac964a076855c11123df68fea0715b\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:371fea832103aaf9816eaadfe599a93e3f242def50557acdb93a3d60481838a9\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ab2fd8_d229_475f_b404_59e5ad925195.slice\": RecentStats: unable to find data in memory cache], [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3c13021b6a6369c1aec32ee08be09d3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05adb7cc8b7ee5eaafe9a21465fbf19c.slice\": RecentStats: unable to find data in memory cache]" | |
Mar 25 18:43:37 k8s-master-1 kubelet[2894775]: E0325 18:43:37.834679 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": partial failures: [\"/system.slice/kubelet.service\": RecentStats: unable to find data in memory cache]" containerName="/system.slice/kubelet.service" | |
Mar 25 18:43:37 k8s-master-1 kubelet[2894775]: E0325 18:43:37.836965 2894775 summary_sys_containers.go:48] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods.slice\": failed to get container info for \"/kubepods.slice\": partial failures: [\"/kubepods.slice\": RecentStats: unable to find data in memory cache]" containerName="/kubepods.slice" | |
Mar 25 18:43:37 k8s-master-1 kubelet[2894775]: E0325 18:43:37.837145 2894775 helpers.go:673] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal=allocatableMemory.available | |
Mar 25 18:43:37 k8s-master-1 kubelet[2894775]: I0325 18:43:37.837255 2894775 helpers.go:746] "Eviction manager: no observation found for eviction signal" signal=allocatableMemory.available | |
Mar 25 18:43:38 k8s-master-1 kubelet[2894775]: I0325 18:43:38.245127 2894775 scope.go:110] "RemoveContainer" containerID="abaffb6a7805e7a7b6564ae4a4a0cd10a7bf2203567077a84bb1876d5a5ed55f" | |
Mar 25 18:43:38 k8s-master-1 kubelet[2894775]: E0325 18:43:38.251606 2894775 pod_workers.go:919] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver pod=kube-apiserver-k8s-master-1_kube-system(fe84e60149ccca0a4ab306df6e279110)\"" pod="kube-system/kube-apiserver-k8s-master-1" podUID=fe84e60149ccca0a4ab306df6e279110 | |
Mar 25 18:43:38 k8s-master-1 kubelet[2894775]: E0325 18:43:38.752332 2894775 event.go:276] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy-fgm87.16df2e6064513bdf", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"5766", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-proxy-fgm87", UID:"95ab2fd8-d229-475f-b404-59e5ad925195", APIVersion:"v1", ResourceVersion:"469", FieldPath:"spec.containers{kube-proxy}"}, Reason:"BackOff", Message:"Back-off restarting failed container", Source:v1.EventSource{Component:"kubelet", Host:"k8s-master-1"}, FirstTimestamp:time.Date(2022, time.March, 23, 20, 56, 20, 0, time.Local), LastTimestamp:time.Date(2022, time.March, 25, 18, 39, 9, 310210529, time.Local), Count:8511, Type:"Warning", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Patch "https://192.168.1.194:6443/api/v1/namespaces/kube-system/events/kube-proxy-fgm87.16df2e6064513bdf": dial tcp 192.168.1.194:6443: connect: connection refused'(may retry after sleeping) | |
Mar 25 18:43:39 k8s-master-1 kubelet[2894775]: I0325 18:43:39.244885 2894775 status_manager.go:604] "Failed to get status for pod" podUID=b3c13021b6a6369c1aec32ee08be09d3 pod="kube-system/kube-scheduler-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:39 k8s-master-1 kubelet[2894775]: I0325 18:43:39.246594 2894775 status_manager.go:604] "Failed to get status for pod" podUID=737409118e728d54b637db0904da4725 pod="kube-system/kube-controller-manager-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:39 k8s-master-1 kubelet[2894775]: I0325 18:43:39.248387 2894775 status_manager.go:604] "Failed to get status for pod" podUID=05adb7cc8b7ee5eaafe9a21465fbf19c pod="kube-system/etcd-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:39 k8s-master-1 kubelet[2894775]: I0325 18:43:39.250213 2894775 status_manager.go:604] "Failed to get status for pod" podUID=95ab2fd8-d229-475f-b404-59e5ad925195 pod="kube-system/kube-proxy-fgm87" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-proxy-fgm87\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:39 k8s-master-1 kubelet[2894775]: I0325 18:43:39.252081 2894775 status_manager.go:604] "Failed to get status for pod" podUID=fe84e60149ccca0a4ab306df6e279110 pod="kube-system/kube-apiserver-k8s-master-1" err="Get \"https://192.168.1.194:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-k8s-master-1\": dial tcp 192.168.1.194:6443: connect: connection refused" | |
Mar 25 18:43:39 k8s-master-1 kubelet[2894775]: W0325 18:43:39.265589 2894775 reflector.go:324] k8s.io/client-go/informers/factory.go:134: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:43:39 k8s-master-1 kubelet[2894775]: E0325 18:43:39.267540 2894775 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://192.168.1.194:6443/apis/storage.k8s.io/v1/csidrivers?resourceVersion=5691": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:43:40 k8s-master-1 kubelet[2894775]: E0325 18:43:40.538676 2894775 controller.go:144] failed to ensure lease exists, will retry in 7s, error: Get "https://192.168.1.194:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/k8s-master-1?timeout=10s": dial tcp 192.168.1.194:6443: connect: connection refused | |
Mar 25 18:43:41 k8s-master-1 kubelet[2894775]: W0325 18:43:41.214025 2894775 container.go:590] Failed to update stats for container "/system.slice/kubepods-burstable-pod737409118e728d54b637db0904da4725.slice:cri-containerd:371fea832103aaf9816eaadfe599a93e3f242def50557acdb93a3d60481838a9": error while statting cgroup v2: [open /sys/kernel/mm/hugepages: no such file or directory | |
Mar 25 18:43:41 k8s-master-1 kubelet[2894775]: failed to fetch hugetlb info | |
Mar 25 18:43:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.statHugeTlb | |
Mar 25 18:43:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go:35 | |
Mar 25 18:43:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2.(*manager).GetStats | |
Mar 25 18:43:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go:123 | |
Mar 25 18:43:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer.(*Handler).GetStats | |
Mar 25 18:43:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/libcontainer/handler.go:86 | |
Mar 25 18:43:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd.(*containerdContainerHandler).GetStats | |
Mar 25 18:43:41 k8s-master-1 kubelet[2894775]: /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/containerd/handler.go:198 | |
Mar 25 18:43:41 k8s-master-1 kubelet[2894775]: k8s.io/kubernetes/vendor/github.com/google/cadvisor/manager.(*containerData).upd |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment