CFN template: https://s3.amazonaws.com/aws-kubernetes-artifacts/lab-ide-vpc.template
aws s3 cp s3://aws-kubernetes-artifacts/lab-ide-build.sh . && \
chmod +x lab-ide-build.sh && \
. ./lab-ide-build.sh
kops create cluster \
--name example.cluster.k8s.local \
--zones $AWS_AVAILABILITY_ZONES \
--yes
kops validate cluster
kubectl get nodes --show-labels
kubectl config get-contexts kubectl config current-context
node, pod, deployment, resource requests and limits and readiness/liveness checks services, secrets, jobs, cronjobs
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
labels:
name: nginx-pod
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
apiVersion: extensions/v1beta1
kind: Deployment # kubernetes object type
metadata:
name: nginx-deployment # deployment name
spec:
replicas: 3 # number of replicas
template:
metadata:
labels:
app: nginx # pod labels
spec:
containers:
- name: nginx # container name
image: nginx:1.12.1 # nginx image
imagePullPolicy: IfNotPresent # if exists, will not pull new image
ports: # container and host port assignments
- containerPort: 80
- containerPort: 443
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: echo-deployment
spec:
replicas: 3
template:
metadata:
labels:
app: echo-pod
spec:
containers:
- name: echoheaders
image: gcr.io/google_containers/echoserver:1.4
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
apiVersion: v1
kind: Service
metadata:
name: echo-service
spec:
selector:
app: echo-pod
ports:
- name: http
protocol: TCP
port: 80
targetPort: 8080
type: LoadBalancer
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: prometheus-daemonset
spec:
template:
metadata:
labels:
tier: monitoring
name: prometheus-exporter
spec:
containers:
- name: prometheus
image: prom/node-exporter
ports:
- containerPort: 80
apiVersion: batch/v1
kind: Job
metadata:
name: wait
spec:
template:
metadata:
name: wait
spec:
containers:
- name: wait
image: ubuntu
command: ["sleep", "20"]
restartPolicy: Never
apiVersion: batch/v1
kind: Job
metadata:
name: wait
spec:
completions: 6
parallelism: 2
template:
metadata:
name: wait
spec:
containers:
- name: wait
image: ubuntu
command: ["sleep", "20"]
restartPolicy: Never
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: hello
spec:
schedule: "*/1 * * * *"
jobTemplate:
spec:
template:
metadata:
labels:
app: hello-cronpod
spec:
containers:
- name: hello
image: busybox
args:
- /bin/sh
- -c
- date; echo Hello World!
restartPolicy: OnFailure
Update history: kubectl rollout history deployment foo Autoscale: kubectl autoscale deployment webapp --cpu-percent=10 --min=1 --max=10
curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64
chmod +x kops-linux-amd64
sudo mv kops-linux-amd64 /usr/local/bin/kops
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectlchmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh && \
chmod +x get_helm.sh && \
./get_helm.sh
kubectl -n kube-system create sa tiller
kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
helm init --service-account tiller
Kops AWS permissions: The kops user will require the following IAM permissions to function properly:
AmazonEC2FullAccess AmazonRoute53FullAccess AmazonS3FullAccess IAMFullAccess AmazonVPCFullAccess
aws iam create-group --group-name kops
aws iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/AmazonEC2FullAccess --group-name kops
aws iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/AmazonRoute53FullAccess --group-name kops
aws iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess --group-name kops
aws iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/IAMFullAccess --group-name kops
aws iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/AmazonVPCFullAccess --group-name kops
aws iam create-user --user-name kops
aws iam add-user-to-group --user-name kops --group-name kops
aws iam create-access-key --user-name kops
You should record the SecretAccessKey and AccessKeyID in the returned JSON output, and then use them below:
configure the aws client to use your new IAM user aws configure # Use your new access and secret key here aws iam list-users # you should see a list of all your IAM users here
Because "aws configure" doesn't export these vars for kops to use, we export them now export AWS_ACCESS_KEY_ID=$(aws configure get aws_access_key_id) export AWS_SECRET_ACCESS_KEY=$(aws configure get aws_secret_access_key)
For a gossip-based cluster, make sure the name ends with k8s.local