Skip to content

Instantly share code, notes, and snippets.

@joaom182
Created March 22, 2020 14:11
Show Gist options
  • Save joaom182/02c46a1b990bfb077f1f3eac39ca6ac7 to your computer and use it in GitHub Desktop.
Save joaom182/02c46a1b990bfb077f1f3eac39ca6ac7 to your computer and use it in GitHub Desktop.
values.yml for bitnami/kafka helm chart
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami Kafka image version
## ref: https://hub.docker.com/r/bitnami/kafka/tags/
##
image:
registry: docker.io
repository: bitnami/kafka
tag: 2.4.1-debian-10-r8
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Set to true if you would like to see extra information on logs
## It turns BASH and NAMI debugging in minideb
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
debug: false
## String to partially override kafka.fullname template (will maintain the release name)
# nameOverride:
## String to fully override kafka.fullname template
# fullnameOverride:
## Init containers parameters:
## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources: {}
## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete
## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
##
updateStrategy: RollingUpdate
## Partition update strategy
## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
##
# rollingUpdatePartition:
## Limits the number of pods of the replicated application that are down simultaneously from voluntary disruptions
## The PDB will only be created if replicaCount is greater than 1
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions
##
podDisruptionBudget:
maxUnavailable: 1
replicaCount: 3
config: |-
# listeners=SASL_SSL://:9092, SSL://:9093
# advertised.listeners=SASL_SSL://:9092, SSL://:9093
# zookeeper.connect=zookeeper:2181
# broker.id=-1
# num.network.threads=3
# num.io.threads=8
# socket.send.buffer.bytes=102400
# socket.receive.buffer.bytes=102400
# socket.request.max.bytes=104857600
# log.dirs=/bitnami/kafka/data
# num.partitions=1
# num.recovery.threads.per.data.dir=1
# offsets.topic.replication.factor=1
# transaction.state.log.replication.factor=1
# transaction.state.log.min.isr=1
# log.flush.interval.messages=10000
# log.flush.interval.ms=1000
# log.retention.hours=168
# log.retention.bytes=1073741824
# log.segment.bytes=1073741824
# log.retention.check.interval.ms=300000
# zookeeper.connection.timeout.ms=6000
# group.initial.rebalance.delay.ms=0
## Kafka docker image available customizations
## https://github.com/bitnami/bitnami-docker-kafka#configuration
##
## Allow to use the PLAINTEXT listener.
allowPlaintextListener: false
## The address the socket server listens on.
# listeners:
## Hostname and port the broker will advertise to producers and consumers.
# advertisedListeners:
## The protocol->listener mapping
# listenerSecurityProtocolMap:
## The listener that the brokers should communicate on
# interBrokerListenerName:
## ID of the Kafka node.
brokerId: -1
## Switch to enable topic deletion or not.
deleteTopicEnable: false
## Kafka's Java Heap size.
heapOpts: -Xmx1024m -Xms1024m
## The number of messages to accept before forcing a flush of data to disk.
logFlushIntervalMessages: 10000
## The maximum amount of time a message can sit in a log before we force a flush.
logFlushIntervalMs: 1000
## A size-based retention policy for logs.
logRetentionBytes: _1073741824
## The interval at which log segments are checked to see if they can be deleted.
logRetentionCheckIntervalMs: 300000
## The minimum age of a log file to be eligible for deletion due to age.
logRetentionHours: 168
## The maximum size of a log segment file. When this size is reached a new log segment will be created.
logSegmentBytes: _1073741824
## Log message format version
logMessageFormatVersion: ""
## A comma separated list of directories under which to store log files.
logsDirs: /bitnami/kafka/data
## The largest record batch size allowed by Kafka
maxMessageBytes: _1000012
## Default replication factors for automatically created topics
defaultReplicationFactor: 3
## The replication factor for the offsets topic
offsetsTopicReplicationFactor: 3
## The replication factor for the transaction topic
transactionStateLogReplicationFactor: 3
## Overridden min.insync.replicas config for the transaction topic
transactionStateLogMinIsr: 3
## The number of threads doing disk I/O.
numIoThreads: 8
## The number of threads handling network requests.
numNetworkThreads: 3
## The default number of log partitions per topic.
numPartitions: 1
## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
numRecoveryThreadsPerDataDir: 1
## The receive buffer (SO_RCVBUF) used by the socket server.
socketReceiveBufferBytes: 102400
## The maximum size of a request that the socket server will accept (protection against OOM).
socketRequestMaxBytes: _104857600
## The send buffer (SO_SNDBUF) used by the socket server.
socketSendBufferBytes: 102400
## Timeout in ms for connecting to zookeeper.
zookeeperConnectionTimeoutMs: 6000
## The endpoint identification algorithm to validate server hostname using server certificate.
sslEndpointIdentificationAlgorithm: https
## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY}
##
# extraEnvVars:
# - name: KAFKA_CFG_BACKGROUND_THREADS
# value: "10"
## Authentication parameteres
## https://github.com/bitnami/bitnami-docker-kafka#security
##
auth:
## Switch to enable the kafka authentication.
enabled: true
## Enable SSL to be used with brokers and consumers
# ssl: true
## Name of the existing secret containing credentials for brokerUser, interBrokerUser and zookeeperUser.
# existingSecret:
## Name of the existing secret containing the certificate files that will be used by Kafka.
certificatesSecret: kafka-certificates
## Password for the above certificates if they are password protected.
certificatesPassword: xxxx
## Kafka client user.
brokerUser: xxxx
## Kafka client password.
brokerPassword: xxxx
## Kafka inter broker communication user.
interBrokerUser: admin
## Kafka inter broker communication password.
interBrokerPassword: xxxx
## Kafka Zookeeper user.
zookeeperUser: xxxx
## Kafka Zookeeper password.
zookeeperPassword: xxxx
## Kubernetes Security Context
## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
# Cluster domain
clusterDomain: cluster.local
## Kubernetes configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer
##
service:
type: ClusterIP
port: 9092
## Specify the NodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Use loadBalancerIP to request a specific static IP,
# loadBalancerIP:
## Service annotations done as key:value pairs
annotations: {}
externalAccess:
enabled: true
service:
## Type of service for external access. It can be LoadBalancer or NodePort.
##
type: NodePort
## Port used when service type is LoadBalancer
##
# port: 19092
## Array of load balancer IPs for each Kafka broker. Length must be the same as replicas
##
# loadBalancerIP: []
## When service type is NodePort, you can specify the domain used for Kafka advertised listeners.
## If not specified, the container will try to get the kubernetes node external IP using: 'curl -s https://ipinfo.io/ip'
domain: mydomain.com
## Array of node ports used for each Kafka broker. Length must be the same as replicas
##
nodePort: [31090, 31091, 31092]
## Service annotations done as key:value pairs
annotations: {}
## Service account for Kafka to use.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
## Specifies whether a ServiceAccount should be created
##
create: true
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the kafka.fullname template
# name:
## Kafka data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart
##
# existingClaim:
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# limits:
# cpu: 200m
# memory: 1Gi
# requests:
# memory: 256Mi
# cpu: 250m
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 2
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Prometheus Exporters / Metrics
##
metrics:
## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter
kafka:
enabled: true
## Bitnami Kafka exporter image
## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/
##
image:
registry: docker.io
repository: bitnami/kafka-exporter
tag: 1.2.0-debian-10-r52
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Interval at which Prometheus scrapes metrics, note: only used by Prometheus Operator
interval: 10s
## Port kafka-exporter exposes for Prometheus to scrape metrics
port: 9308
## Prometheus Kafka Exporter' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 100m
# memory: 128Mi
requests: {}
# cpu: 100m
# memory: 128Mi
## Annotations for the Kafka Exporter Prometheus metrics deployment
##
annotations: {}
## Annotations for the Kafka Exporter pods
##
podAnnotations: {}
service:
## Kafka Exporter Service type
##
type: ClusterIP
## Kafka Exporter Prometheus port
##
port: 9308
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort: ""
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Set the Cluster IP to use
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
##
# clusterIP: None
## Annotations for the Kafka Exporter Prometheus metrics service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.kafka.port }}"
prometheus.io/path: "/metrics"
## Prometheus JMX Exporter: exposes the majority of Kafkas metrics
##
jmx:
enabled: true
## Bitnami JMX exporter image
## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/
##
image:
registry: docker.io
repository: bitnami/jmx-exporter
tag: 0.12.0-debian-10-r51
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Interval at which Prometheus scrapes metrics, note: only used by Prometheus Operator
##
interval: 10s
## Port jmx-exporter exposes Prometheus format metrics to scrape
##
exporterPort: 5556
## Prometheus JMX Exporter' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 100m
# memory: 128Mi
requests: {}
# cpu: 100m
# memory: 128Mi
service:
## JMX Exporter Service type
##
type: ClusterIP
## JMX Exporter Prometheus port
##
port: 5556
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort: ""
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Set the Cluster IP to use
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
##
# clusterIP: None
## Annotations for the JMX Exporter Prometheus metrics service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.jmx.exporterPort }}"
prometheus.io/path: "/"
## Credits to the incubator/kafka chart for the JMX configuration.
## https://github.com/helm/charts/tree/master/incubator/kafka
##
## Rules to apply to the Prometheus JMX Exporter. Note while lots of stats have been cleaned and exposed,
## there are still more stats to clean up and expose, others will never get exposed. They keep lots of duplicates
## that can be derived easily. The configMap in this chart cleans up the metrics it exposes to be in a Prometheus
## format, eg topic, broker are labels and not part of metric name. Improvements are gladly accepted and encouraged.
##
configMap:
## Allows disabling the default configmap, note a configMap is needed
##
enabled: true
## Allows setting values to generate confimap
## To allow all metrics through (warning its crazy excessive) comment out below `overrideConfig` and set
## `whitelistObjectNames: []`
##
overrideConfig: {}
# jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi
# lowercaseOutputName: true
# lowercaseOutputLabelNames: true
# ssl: false
# rules:
# - pattern: ".*"
## If you would like to supply your own ConfigMap for JMX metrics, supply the name of that
## ConfigMap as an `overrideName` here.
##
overrideName: ""
## Port the jmx metrics are exposed in native jmx format, not in Prometheus format
##
jmxPort: 5555
## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted
## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics
## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []`
## (2) commented out above `overrideConfig`.
##
whitelistObjectNames:
- kafka.controller:*
- kafka.server:*
- java.lang:*
- kafka.network:*
- kafka.log:*
# Enable this if you're using https://github.com/coreos/prometheus-operator
serviceMonitor:
enabled: false
## Namespace in which Prometheus is running
##
# namespace: monitoring
## Interval at which metrics should be scraped.
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# interval: 10s
## Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# scrapeTimeout: 10s
## ServiceMonitor selector labels
## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
##
# selector:
# prometheus: my-prometheus
##
## Zookeeper chart configuration
##
## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml
##
zookeeper:
enabled: true
auth:
enabled: true
serverUsers: xxxx
serverPasswords: xxxx
clientUser: xxxx
clientPassword: xxxx
metrics:
enabled: true
externalZookeeper:
## This value is only used when zookeeper.enabled is set to false
## Server or list of external zookeeper servers to use.
servers:
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment