Skip to content

Instantly share code, notes, and snippets.

@veysiertekin
Last active December 1, 2020 13:33
Show Gist options
  • Save veysiertekin/d50490cadfb70de59e4bd4569d0b0286 to your computer and use it in GitHub Desktop.
Save veysiertekin/d50490cadfb70de59e4bd4569d0b0286 to your computer and use it in GitHub Desktop.
Kubernetes k8s

Add private nameserver to coredns

0- Default Coredns config

.:53 {
    errors
    health
    ready

    kubernetes cluster.local in-addr.arpa ip6.arpa {

      pods verified
      fallthrough in-addr.arpa ip6.arpa
    }
    autopath @kubernetes
    prometheus :9153
    forward . /etc/resolv.conf
    cache 30
    loop
    reload
    loadbalance
}

1- Add following configuration to top of the kube-system->config-map->coredns->Corefile

<domain>:53 {
    errors
    cache 30
    forward . <name server>
}

Example :

topdomain.com:53 {
    errors
    cache 30
    forward . 8.8.8.8
}

2- Restart pods by deleting coredns pods from kube-system to reload configuration.

kubectl -n kube-system get pod
kubectl -n kube-system delete pod <pod_id>

3- Done!

Adding hosts definition to CoreDNS

<first host> <second host> {
    hosts {
        <first ip> <first host>
        <second ip> <second host>
        fallthrough
    }
    whoami
}

For example:

test.domain {
    hosts {
        127.0.0.1 test.domain
        fallthrough
    }
    whoami
}

test2.domain test3.domain {
    hosts {
        127.0.0.2 test2.domain
        127.0.0.3 test3.domain
        fallthrough
    }
    whoami
}
###########################
# View api fields #
###########################
# List all api versions
kubectl api-versions
# List all api resources
kubectl api-resources
# View list of available fields
kubectl explain --api-version=<api version> <api resource>
##################
# Exmaple #
##################
kubectl explain --api-version=autoscaling/v1 HorizontalPodAutoscaler
# Detail of `spec` field
kubectl explain --api-version=autoscaling/v1 HorizontalPodAutoscaler.spec
# Detail of `scaleTargetRef` field in `spec` and so on...
kubectl explain --api-version=autoscaling/v1 HorizontalPodAutoscaler.spec.scaleTargetRef
##################
# Misc #
##################
# Delete all pending pods
kubectl get pods --no-headers=true | awk '/Pending/{print $1}'| xargs kubectl delete pod
# In plain example
kubectl apply -f file.yml
# Delete deployment
kubectl delete -f file.yml
# if you have a context & ssl issues
kubectl --context=<app_context> --insecure-skip-tls-verify=true -n <app_name> apply -f file.yml
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "<claim-name>",
"namespace": "default",
"annotations": {
"pv.kubernetes.io/bind-completed": "yes",
"pv.kubernetes.io/bound-by-controller": "yes"
},
"finalizers": [
"kubernetes.io/pvc-protection"
]
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "5Gi"
}
},
"volumeName": "<volume-name>",
"storageClassName": "manual"
}
}
{
"kind": "PersistentVolume",
"apiVersion": "v1",
"metadata": {
"name": "<volume-name>",
"labels": {
"type": "local"
},
"annotations": {
"pv.kubernetes.io/bound-by-controller": "yes"
},
"finalizers": [
"kubernetes.io/pv-protection"
]
},
"spec": {
"capacity": {
"storage": "5Gi"
},
"hostPath": {
"path": "/opt/<volume-name>",
"type": ""
},
"accessModes": [
"ReadWriteOnce"
],
"persistentVolumeReclaimPolicy": "Retain",
"storageClassName": "manual"
}
}
# Create dashboard
kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml
# Bind `https`
kubectl port-forward deployment/kubernetes-dashboard --insecure-skip-tls-verify=true -n kube-system 8443:8443
# View secret resources
kubectl -n kube-system get secret
# View access token
kubectl -n kube-system describe secret namespace-controller-token-<pod_id>
#
# For DNS debugging you need to install busybox
#
kubectl create -f https://k8s.io/examples/admin/dns/busybox.yaml
# Look up service
kubectl exec -ti busybox -- nslookup <service-name>.<namespace>
# exec by namespace and app name
NAMESPACE=<namespace> APP_NAME=<app_name> POD=$(kubectl -n $NAMESPACE get pod -l app=$APP_NAME -o jsonpath="{.items[0].metadata.name}") && kubectl exec -n $NAMESPACE -ti $POD -- ls -la
kubectl --insecure-skip-tls-verify=true --context=<context_name> -n <name_of_the_app> delete --all pods
# view pods
kubectl --insecure-skip-tls-verify=true --context=<context_name> -n <name_of_the_app> get pods
kubectl get pods
# view pods in a node
kubectl get pods --all-namespaces -o wide --field-selector spec.nodeName=<node name>
kubectl --context=<app_context> --insecure-skip-tls-verify=true -n <app_name> get pod -l io.kompose.service=<service_name> -o jsonpath='{.items[0].metadata.name}'
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: hello-deployment
spec:
replicas: 1
template:
metadata:
labels:
run: hello-world
spec:
containers:
- name: hello
image: nginxdemos/hello:latest
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: hello-service
labels:
run: hello-world
spec:
type: NodePort
ports:
- port: 80
protocol: TCP
selector:
run: hello-world

Init single node cluster

As root:

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
# Set SELinux in permissive mode (effectively disabling it)
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
systemctl enable --now kubelet
systemctl daemon-reload
systemctl restart kubelet
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

yum install -y docker
systemctl start docker

kubeadm init --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=NumCPU

As a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl -n kube-system apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml

Create dashboard

# Install dashboard
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml

# Create a NodePort
kubectl -n kube-system edit service  kubernetes-dashboard

Create a service account

service-account.yml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
kubectl apply -f service-account.yml

# Get account token
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
kubectl --context=volt-context --insecure-skip-tls-verify=true -n <app_name> logs -f deployment/<service_name>
kubectl port-forward <node>-8586b765d4-tpr9r --insecure-skip-tls-verify=true --context=app-context -n app 8080:8080
# or
kubectl port-forward deployment/<service_name> --insecure-skip-tls-verify=true --context=app-context -n app 8080:8080
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment