Skip to content

Instantly share code, notes, and snippets.

@junghoon2
Created May 28, 2021 18:40
Show Gist options
  • Save junghoon2/20d70ab17c564a26aeee945735d3628a to your computer and use it in GitHub Desktop.
Save junghoon2/20d70ab17c564a26aeee945735d3628a to your computer and use it in GitHub Desktop.
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
## Bitnami NGINX image version
## ref: https://hub.docker.com/r/bitnami/nginx/tags/
##
image:
registry: docker.io
repository: bitnami/nginx
tag: 1.19.10-debian-10-r35
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: 'Always'
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## E.g.:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Set to true if you would like to see extra information on logs
##
debug: false
## String to partially override nginx.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override nginx.fullname template
##
# fullnameOverride:
## Force target Kubernetes version (using Helm capabilites if not set)
##
kubeVersion:
## Deployment pod host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## Kubernetes Cluster Domain
##
clusterDomain: cluster.local
## Extra objects to deploy (value evaluated as a template)
##
extraDeploy: []
## Add labels to all the deployed resources
##
commonLabels: {}
## Add annotations to all the deployed resources
##
commonAnnotations: {}
## Command and args for running the container (set to default if not set). Use array form
##
# command:
# args:
## Additional environment variables to set
## E.g:
## extraEnvVars:
## - name: FOO
## value: BAR
##
extraEnvVars: []
## ConfigMap with extra environment variables
##
# extraEnvVarsCM:
## Secret with extra environment variables
##
# extraEnvVarsSecret:
## Get the server static content from a git repository
## NOTE: This will override staticSiteConfigmap and staticSitePVC
##
cloneStaticSiteFromGit:
enabled: false
## Bitnami Git image version
## ref: https://hub.docker.com/r/bitnami/git/tags/
##
image:
registry: docker.io
repository: bitnami/git
tag: 2.31.1-debian-10-r52
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Repository to clone static content from
##
# repository:
## Branch inside the git repository
##
# branch:
## Interval for sidecar container pull from the repository
##
interval: 60
## Additional configuration for git-clone-repository initContainer
##
gitClone:
## Override container command
##
command: []
## Override container args
##
args:
## Additional configuration for the git-repo-syncer container
##
gitSync:
## Override container command
##
command: []
## Override container args
##
args: []
## Additional environment variables to set for the in the containers that clone static site from git
## E.g:
## extraEnvVars:
## - name: FOO
## value: BAR
##
extraEnvVars: []
## Add extra volume mounts for the GIT containers
## Useful to mount keys to connect through ssh. (normally used with extraVolumes)
## E.g:
## extraVolumeMounts:
## - name: ssh-dir
## mountPath: /root/.ssh/
##
extraVolumeMounts: []
## Custom server block to be added to NGINX configuration
## PHP-FPM example server block:
## serverBlock: |-
## server {
## listen 0.0.0.0:8080;
## root /app;
## location / {
## index index.html index.php;
## }
## location ~ \.php$ {
## fastcgi_pass phpfpm-server:9000;
## fastcgi_index index.php;
## include fastcgi.conf;
## }
## }
##
# serverBlock:
## ConfigMap with custom server block to be added to NGINX configuration
## NOTE: This will override serverBlock
##
# existingServerBlockConfigmap:
## Name of existing ConfigMap with the server static site content
##
# staticSiteConfigmap
## Name of existing PVC with the server static site content
## NOTE: This will override staticSiteConfigmap
##
# staticSitePVC
## Number of replicas to deploy
##
replicaCount: 3
## Pod extra labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Pod affinity preset
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
## Allowed values: soft, hard
##
podAffinityPreset: ""
## Pod anti-affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
## Allowed values: soft, hard
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
## Allowed values: soft, hard
##
nodeAffinityPreset:
## Node affinity type
## Allowed values: soft, hard
##
type: ""
## Node label key to match
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## Node label values to match
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: {}
## NGINX pods' Security Context.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
##
podSecurityContext:
enabled: false
fsGroup: 1001
## sysctl settings
## Example:
## sysctls:
## - name: net.core.somaxconn
## value: "10000"
##
sysctls: []
## NGINX Core containers' Security Context (only main container).
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
##
containerSecurityContext:
enabled: false
runAsUser: 1001
runAsNonRoot: true
## Configures the ports NGINX listens on
##
containerPorts:
http: 8080
# https: 8443
## NGINX containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 100m
# memory: 128Mi
requests: {}
# cpu: 100m
# memory: 128Mi
## NGINX containers' liveness and readiness probes.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
enabled: true
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
timeoutSeconds: 3
periodSeconds: 5
failureThreshold: 3
successThreshold: 1
## Custom Liveness probe
##
customLivenessProbe: {}
## Custom Rediness probe
##
customReadinessProbe: {}
## Autoscaling parameters
##
autoscaling:
enabled: false
# minReplicas: 1
# maxReplicas: 10
# targetCPU: 50
# targetMemory: 50
## Array to add extra volumes (evaluated as a template)
##
extraVolumes: []
## Array to add extra mounts (normally used with extraVolumes, evaluated as a template)
##
extraVolumeMounts: []
## Pods Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
## Specifies whether a ServiceAccount should be created
##
create: false
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the `common.names.fullname` template
# name:
## Annotations for service account. Evaluated as a template.
## Only used if `create` is `true`.
##
annotations: {}
## NGINX Service properties
##
service:
## Service type
##
type: LoadBalancer
## HTTP Port
##
port: 80
## HTTPS Port
##
httpsPort: 443
## Specify the nodePort(s) value(s) for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePorts:
http: ""
https: ""
## Target port reference value for the Loadbalancer service types can be specified explicitly.
## Listeners for the Loadbalancer can be custom mapped to the http or https service.
## Example: Mapping the https listener to targetPort http [http: https]
##
targetPort:
http: http
https: https
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## LDAP Auth Daemon Properties
##
## Daemon that will proxy LDAP requests
## between NGINX and a given LDAP Server
##
ldapDaemon:
enabled: false
## Bitnami NGINX LDAP Auth Daemon image
## ref: https://hub.docker.com/r/bitnami/nginx-ldap-auth-daemon/tags/
##
image:
registry: docker.io
repository: bitnami/nginx-ldap-auth-daemon
tag: 0.20200116.0-debian-10-r348
pullPolicy: IfNotPresent
## LDAP Daemon port
##
port: 8888
## LDAP Auth Daemon Configuration
##
## These different properties define the form of requests performed
## against the given LDAP server
##
## BEWARE THAT THESE VALUES WILL BE IGNORED IF A CUSTOM LDAP SERVER BLOCK
## ALREADY SPECIFIES THEM.
##
##
ldapConfig:
## LDAP URI where to query the server
## Must follow the pattern -> ldap[s]:/<hostname>:<port>
##
uri: ""
## LDAP search base DN
##
baseDN: ""
## LDAP bind DN
##
bindDN: ""
## LDAP bind Password
##
bindPassword: ""
## LDAP search filter
##
filter: ""
## LDAP auth realm
##
httpRealm: ""
## LDAP cookie name
##
httpCookieName: ""
## NGINX Configuration File containing the directives (that define
## how LDAP requests are performed) and tells NGINX to use the LDAP Daemon
## as proxy. Besides, it defines the routes that will require of LDAP auth
## in order to be accessed.
##
## If LDAP directives are provided, they will take precedence over
## the ones specified in ldapConfig.
##
## This will be evaluated as a template.
##
##
nginxServerBlock: |-
server {
listen 0.0.0.0:{{ .Values.containerPorts.http }};
# You can provide a special subPath or the root
location = / {
auth_request /auth-proxy;
}
location = /auth-proxy {
internal;
proxy_pass http://127.0.0.1:{{ .Values.ldapDaemon.port }};
###############################################################
# YOU SHOULD CHANGE THE FOLLOWING TO YOUR LDAP CONFIGURATION #
###############################################################
# URL and port for connecting to the LDAP server
# proxy_set_header X-Ldap-URL "ldap://YOUR_LDAP_SERVER_IP:YOUR_LDAP_SERVER_PORT";
# Base DN
# proxy_set_header X-Ldap-BaseDN "dc=example,dc=org";
# Bind DN
# proxy_set_header X-Ldap-BindDN "cn=admin,dc=example,dc=org";
# Bind password
# proxy_set_header X-Ldap-BindPass "adminpassword";
}
}
## Use an existing Secret holding an NGINX Configuration file that
## configures LDAP requests. (will be evaluated as a template)
##
## If provided, both nginxServerBlock and ldapConfig properties are ignored.
##
existingNginxServerBlockSecret:
## LDAP Auth Daemon containers' liveness and readiness probes.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
enabled: true
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
timeoutSeconds: 3
periodSeconds: 5
failureThreshold: 3
successThreshold: 1
## Custom Liveness probe
##
customLivenessProbe: {}
## Custom Rediness probe
##
customReadinessProbe: {}
## Sidecar parameters
##
# sidecars:
# - name: your-image-name
# image: your-image
# imagePullPolicy: Always
# ports:
# - name: portname
# containerPort: 1234
sidecars:
## Extra init containers
##
initContainers:
## Configure the ingress resource that allows you to access the
## Nginx installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
##
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
##
certManager: false
## Ingress Path type
##
pathType: ImplementationSpecific
## Override API Version (automatically detected if not set)
##
apiVersion:
## When the ingress is enabled, a host pointing to this will be created
##
hostname: nginx.local
## The Path to Nginx. You may need to set this to '/*' in order to use this
## with ALB ingress controllers.
##
path: /
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
##
annotations: {}
## Enable TLS configuration for the hostname defined at ingress.hostname parameter
## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }}
## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
##
tls: false
## The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## extraHosts:
## - name: nginx.local
## path: /
##
## Any additional arbitrary paths that may need to be added to the ingress under the main host.
## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
##
## The tls configuration for additional hostnames to be covered with this ingress record.
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## extraTls:
## - hosts:
## - nginx.local
## secretName: nginx.local-tls
##
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
##
secrets: []
## - name: nginx.local-tls
## key:
## certificate:
##
## Health Ingress parameters
##
healthIngress:
## Set to true to enable health ingress record generation
##
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
##
certManager: false
## Ingress Path type
##
pathType: ImplementationSpecific
## When the health ingress is enabled, a host pointing to this will be created
##
hostname: example.local
## Health Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
##
annotations: {}
## Enable TLS configuration for the hostname defined at healthIngress.hostname parameter
## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.healthIngress.hostname }}
## You can use the healthIngress.secrets parameter to create this TLS secret, relay on cert-manager to create it, or
## let the chart create self-signed certificates for you
##
tls: false
## The list of additional hostnames to be covered with this health ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## E.g.
## extraHosts:
## - name: example.local
## path: /
##
extraHosts: []
## The tls configuration for additional hostnames to be covered with this health ingress record.
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## E.g.
## extraTls:
## - hosts:
## - example.local
## secretName: example.local-tls
##
extraTls: []
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or -----BEGIN RSA PRIVATE KEY-----
## name should line up with a secretName set further up
## If it is not set and you're using cert-manager, this is unneeded, as it will create the secret for you
## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
##
## E.g.
## secrets:
## - name: example.local-tls
## key:
## certificate:
##
secrets: []
## Prometheus Exporter / Metrics
##
metrics:
enabled: true
## Bitnami NGINX Prometheus Exporter image
## ref: https://hub.docker.com/r/bitnami/nginx-exporter/tags/
##
image:
registry: docker.io
repository: bitnami/nginx-exporter
tag: 0.9.0-debian-10-r56
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Prometheus exporter pods' annotation and labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Prometheus exporter service parameters
##
service:
## NGINX Prometheus exporter port
##
port: 9113
## Annotations for the Prometheus exporter service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.service.port }}"
## NGINX Prometheus exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 100m
# memory: 128Mi
requests: {}
# cpu: 100m
# memory: 128Mi
## Prometheus Operator ServiceMonitor configuration
##
serviceMonitor:
enabled: false
## Namespace in which Prometheus is running
##
# namespace: monitoring
## Interval at which metrics should be scraped.
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# interval: 10s
## Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# scrapeTimeout: 10s
## ServiceMonitor selector labels
## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
##
# selector:
# prometheus: my-prometheus
## Pod Disruption Budget configuration
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
##
pdb:
create: false
## Min number of pods that must still be available after the eviction
##
minAvailable: 1
## Max number of pods that can be unavailable after the eviction
##
# maxUnavailable: 1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment