helm ls -a
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
nfs-media-server default 1 2020-07-08 15:21:46.0611715 -0400 EDT deployed nfs-server-provisioner-1.1.1 2.3.0
helm history nfs-media-server
REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION
1 Wed Jul 8 15:21:46 2020 deployed nfs-server-provisioner-1.1.1 2.3.0 Install complete
k8s describe pod nfs-media-server-nfs-server-provisioner-0
Name: nfs-media-server-nfs-server-provisioner-0
Namespace: default
Priority: 0
Node: aks-nodepool1-20892315-0/10.240.0.9
Start Time: Thu, 09 Jul 2020 18:20:17 -0400
Labels: app=nfs-server-provisioner
chart=nfs-server-provisioner-1.1.1
controller-revision-hash=nfs-media-server-nfs-server-provisioner-699b9689d6
heritage=Helm
release=nfs-media-server
statefulset.kubernetes.io/pod-name=nfs-media-server-nfs-server-provisioner-0
Annotations: <none>
Status: Running
IP: 10.244.0.19
IPs:
IP: 10.244.0.19
Controlled By: StatefulSet/nfs-media-server-nfs-server-provisioner
Containers:
nfs-server-provisioner:
Container ID: docker://2c2dd854cbaadf534d6bd59a02b5186a5bf90fe58d1fb3f42c6786f4d6355981
Image: quay.io/kubernetes_incubator/nfs-provisioner:v2.3.0
Image ID: docker-pullable://quay.io/kubernetes_incubator/nfs-provisioner@sha256:f402e6039b3c1e60bf6596d283f3c470ffb0a1e169ceb8ce825e3218cd66c050
Ports: 2049/TCP, 2049/UDP, 32803/TCP, 32803/UDP, 20048/TCP, 20048/UDP, 875/TCP, 875/UDP, 111/TCP, 111/UDP, 662/TCP, 662/UDP
Host Ports: 0/TCP, 0/UDP, 0/TCP, 0/UDP, 0/TCP, 0/UDP, 0/TCP, 0/UDP, 0/TCP, 0/UDP, 0/TCP, 0/UDP
Args:
-provisioner=cluster.local/nfs-media-server-nfs-server-provisioner
State: Waiting
Reason: CrashLoopBackOff
Last State: Terminated
Reason: Error
Exit Code: 255
Started: Fri, 10 Jul 2020 03:40:58 -0400
Finished: Fri, 10 Jul 2020 03:41:03 -0400
Ready: False
Restart Count: 112
Environment:
POD_IP: (v1:status.podIP)
SERVICE_NAME: nfs-media-server-nfs-server-provisioner
POD_NAMESPACE: default (v1:metadata.namespace)
Mounts:
/export from data (rw)
/var/run/secrets/kubernetes.io/serviceaccount from nfs-media-server-nfs-server-provisioner-token-p4qlc (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
data:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: data-nfs-media-server-nfs-server-provisioner-0
ReadOnly: false
nfs-media-server-nfs-server-provisioner-token-p4qlc:
Type: Secret (a volume populated by a Secret)
SecretName: nfs-media-server-nfs-server-provisioner-token-p4qlc
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning BackOff 5m34s (x2496 over 9h) kubelet, aks-nodepool1-20892315-0 Back-off restarting failed container
Normal Pulled 31s (x112 over 9h) kubelet, aks-nodepool1-20892315-0 Container image "quay.io/kubernetes_incubator/nfs-provisioner:v2.3.0" already present on machine
k8s get pod nfs-media-server-nfs-server-provisioner-0
NAME READY STATUS RESTARTS AGE
nfs-media-server-nfs-server-provisioner-0 0/1 CrashLoopBackOff 173 14h