Skip to content

Instantly share code, notes, and snippets.

@mikejk8s
Last active January 26, 2018 23:10
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mikejk8s/8df02b78d3ddeb3663162d3d15b17961 to your computer and use it in GitHub Desktop.
Save mikejk8s/8df02b78d3ddeb3663162d3d15b17961 to your computer and use it in GitHub Desktop.
0.10.0 nginx-ingress full RBAC
apiVersion: v1
kind: Service
metadata:
labels:
name: "{{.Values.websocket_ingress_name}}"
app: {{ template "fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
name: "{{.Values.websocket_ingress_name}}"
namespace: "{{.Values.my_namespace}}"
spec:
loadBalancerIP: {{.Values.websocket_ingress_ip}}
type: LoadBalancer
ports:
- port: 443
name: https
selector:
name: "{{.Values.websocket_ingress_name}}"
sessionAffinity: ClientIP
externalTrafficPolicy: Local
---
apiVersion: apps/v1beta2
kind: Deployment
metadata:
labels:
app: {{ template "fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
name: "{{.Values.websocket_ingress_name}}"
namespace: "{{.Values.my_namespace}}"
spec:
replicas: 1
selector:
matchLabels:
name: "{{.Values.websocket_ingress_name}}"
template:
metadata:
labels:
name: "{{.Values.websocket_ingress_name}}"
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
serviceAccountName: websocket-ingress-serviceaccount
initContainers:
- command:
- sh
- -c
- sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range="1024 65535"
image: alpine:3.6
imagePullPolicy: IfNotPresent
name: sysctl
securityContext:
privileged: true
containers:
- name: "{{.Values.websocket_ingress_name}}"
image: "{{.Values.websocket_ingress_image}}"
imagePullPolicy: Always
args:
- /nginx-ingress-controller
- --ingress-class={{.Values.websocket_ingress_name}}
- --election-id={{.Values.websocket_ingress_name}}
# Ingress controller redirects to the given server for any unknown subdomain, can be any but:
# - Should serve a HTTP/404 on /
# - Must serve a HTTP/200 on /healthz
- --default-backend-service={{.Values.my_namespace}}/{{.Values.default_backend_name}}
- --configmap=$(POD_NAMESPACE)/{{.Values.websocket_ingress_name}}
# 0.9.0 and up use new ingress API
- --annotations-prefix=ingress.kubernetes.io
# Use downward API
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 443
livenessProbe:
failureThreshold: 2
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 2
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
memory: 512Mi
cpu: 500m
limits:
memory: 512Mi
cpu: 500m
nodeSelector:
cloud.google.com/gke-nodepool: "{{.Values.my_noed_pool}}"
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
labels:
name: "{{.Values.websocket_ingress_name}}"
app: {{ template "fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
name: "{{.Values.websocket_ingress_name}}"
namespace: "{{.Values.my_namespace}}"
annotations:
# This tells to only use the Nginx Ingress Controller
# and avoids the creation on a Global LoadBalancer on GKE.
kubernetes.io/ingress.class: "{{.Values.websocket_ingress_name}}"
nginx.ingress.kubernetes.io/affinity: cookie
nginx.ingress.kubernetes.io/upstream-fail-timeout: "10"
nginx.ingress.kubernetes.io/upstream-max-fails: "2"
certmanager.k8s.io/cluster-issuer: "{{ .Values.ssl.cluster_issuer_cf }}"
certmanager.k8s.io/acme-challenge-type: "{{ .Values.ssl.acme_challenge_cf }}"
certmanager.k8s.io/acme-dns01-provider: "{{ .Values.ssl.acme_provider_cf }}"
kubernetes.io/tls-acme: "true"
spec:
tls:
- secretName: websocket-ingress-tls
# List of hosts supported by this certificate:
hosts:
- {{.Values.websocket_name}}.mydomain.com
rules:
- host: {{.Values.websocket_name}}.mydomain.com
http:
paths:
- path: /
backend:
serviceName: "{{.Values.websocket_name}}"
servicePort: 10002
---
apiVersion: v1
kind: ConfigMap
metadata:
labels:
name: "{{.Values.websocket_ingress_name}}"
app: {{ template "fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
name: "{{.Values.websocket_ingress_name}}"
namespace: "{{.Values.my_namespace}}"
labels:
app: "{{.Values.websocket_ingress_name}}"
data:
enable-sticky-sessions: 'true' ## use ROUTE cookie to provide session affinity
enable-vts-status: 'true' ## Allows the replacement of the default status page nginx-module-vts
use-http2: 'true'
load-balance: 'ip_hash'
worker-process: '1'
keep-alive: '650'
---
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
labels:
name: "{{.Values.websocket_ingress_name}}"
app: {{ template "fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
name: "{{.Values.websocket_ingress_hpa_name}}"
namespace: "{{.Values.my_namespace}}"
spec:
scaleTargetRef:
kind: Deployment
name: "{{.Values.websocket_ingress_name}}"
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 50
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
name: "{{.Values.websocket_ingress_name}}"
app: {{ template "fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
name: websocket-ingress-serviceaccount
namespace: "{{.Values.my_namespacel}}"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
labels:
name: "{{.Values.websocket_ingress_name}}"
app: {{ template "fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
name: websocket-ingress-role
namespace: "{{.Values.my_namespace}}"
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
# - "ingress-controller-leader-{{.Values.websocket_ingress_name}}"
- "{{.Values.websocket_ingress_name}}-{{.Values.websocket_ingress_name}}"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
labels:
name: "{{.Values.websocket_ingress_name}}"
app: {{ template "fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
name: websocket-ingress-role-nisa-binding
namespace: "{{.Values.my_namespace}}"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: websocket-ingress-role
subjects:
- kind: ServiceAccount
name: websocket-ingress-serviceaccount
namespace: "{{.Values.my_namespace}}"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
name: "{{.Values.websocket_ingress_name}}"
app: {{ template "fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
name: websocket-ingress-clusterrole-nisa-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
subjects:
- kind: ServiceAccount
name: websocket-ingress-serviceaccount
namespace: "{{.Values.my_namespace}}"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment