Skip to content

Instantly share code, notes, and snippets.

@jmaitrehenry
Created March 5, 2019 16:23
Show Gist options
  • Save jmaitrehenry/0df76b69672fd8c08baef7e9da9c6b88 to your computer and use it in GitHub Desktop.
Save jmaitrehenry/0df76b69672fd8c08baef7e9da9c6b88 to your computer and use it in GitHub Desktop.
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
namespace: production
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
namespace: jmaitrehenry
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
name: "etcd"
namespace: kube-system
annotations:
# Create endpoints also if the related pod isn't ready
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
ports:
- port: 2379
name: client
- port: 2380
name: peer
clusterIP: None
selector:
component: "etcd"
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: "etcd"
namespace: kube-system
labels:
component: "etcd"
spec:
serviceName: "etcd"
# changing replicas value will require a manual etcdctl member remove/add
# command (remove before decreasing and add after increasing)
replicas: 3
template:
metadata:
name: "etcd"
namespace: kube-system
labels:
component: "etcd"
spec:
containers:
- name: "etcd"
image: "quay.io/coreos/etcd:v3.2.3"
ports:
- containerPort: 2379
name: client
- containerPort: 2380
name: peer
env:
- name: CLUSTER_SIZE
value: "3"
- name: SET_NAME
value: "etcd"
volumeMounts:
- name: etcd-data
mountPath: /var/run/etcd
command:
- "/bin/sh"
- "-ecx"
- |
IP=$(hostname -i)
# there's no need to wait since it'll cause problems when
# restarting an already initialized cluster if a pod cannot be
# scheduled since its related endpoint (and so dns entry) won't be
# created During initialization etcd will fail to resolve the name
# and retry.
#
#for i in $(seq 0 $((${CLUSTER_SIZE} - 1))); do
# while true; do
# echo "Waiting for ${SET_NAME}-${i}.${SET_NAME} to come up"
# ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME} > /dev/null && break
# sleep 1s
# done
#done
PEERS=""
for i in $(seq 0 $((${CLUSTER_SIZE} - 1))); do
PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:2380"
done
# start etcd. If cluster is already initialized the `--initial-*` options will be ignored.
exec etcd --name ${HOSTNAME} \
--listen-peer-urls http://${IP}:2380 \
--listen-client-urls http://${IP}:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
--initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster ${PEERS} \
--initial-cluster-state new \
--data-dir /var/run/etcd/default.etcd
## We are using dynamic pv provisioning using the "standard" storage class so
## this resource can be directly deployed without changes to minikube (since
## minikube defines this class for its minikube hostpath provisioner). In
## production define your own way to use pv claims.
volumeClaimTemplates:
- metadata:
name: etcd-data
namespace: kube-system
spec:
accessModes:
- "ReadWriteOnce"
storageClassName: managed-premium
resources:
requests:
storage: 10Gi
---
apiVersion: batch/v1
kind: Job
metadata:
name: traefik-boostrap
namespace: kube-system
spec:
template:
metadata:
name: traefik-boostrap
spec:
containers:
- image: traefik
name: traefik-bootstrap
args:
- storeconfig
- --configfile=/etc/traefik/traefik.toml
volumeMounts:
- name: traefik-config
mountPath: /etc/traefik
restartPolicy: Never
volumes:
- name: traefik-config
configMap:
name: traefik
---
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: jmaitrehenry/traefik:v1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
- name: admin
containerPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --kubernetes.namespaces=jmaitrehenry,production
- --logLevel=DEBUG
- --debug
- --configFile=/etc/traefik/traefik.toml
volumeMounts:
- name: traefik-config
mountPath: /etc/traefik
volumes:
- name: traefik-config
configMap:
name: traefik-v1.0.0
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: http
- protocol: TCP
port: 443
name: https
- protocol: TCP
port: 8080
name: admin
type: LoadBalancer
---
apiVersion: v1
kind: ConfigMap
metadata:
name: traefik-v1.0.0
namespace: kube-system
data:
traefik.toml: |-
checkNewVersion = false
IdleTimeout = "180s"
MaxIdleConnsPerHost = 500
logLevel = "INFO"
defaultEntryPoints = ["http", "https"]
[entryPoints]
[entryPoints.http]
address = ":80"
[entryPoints.http.redirect]
entryPoint = "https"
[entryPoints.https]
address = ":443"
[entryPoints.https.tls]
[retry]
attempts = 3
[api]
[etcd]
endpoint = "etcd:2379"
prefix = "/traefik100"
useAPIV3 = true
[acme]
email = "julien@kumojin.com"
storage = "traefik/acme/account"
entryPoint = "https"
OnHostRule = true
acmeLogging = true
#caServer = "https://acme-staging.api.letsencrypt.org/directory"
[acme.tlsChallenge]
[accessLog]
resolv.conf: |-
nameserver 10.3.0.10
search kube-system.svc.cluster.local svc.cluster.local cluster.local
options ndots:5
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment