Skip to content

Instantly share code, notes, and snippets.

@darkdragn
Last active October 26, 2020 06:09
Show Gist options
  • Save darkdragn/e19982150306ebd4368c6fdb890a7303 to your computer and use it in GitHub Desktop.
Save darkdragn/e19982150306ebd4368c6fdb890a7303 to your computer and use it in GitHub Desktop.
Opendistro Configuration (config.yaml goes in secret/opendistro-es-security-config)
_meta:
type: "config"
config_version: 2
config:
# #internalProxies: '.*' # trust all internal proxies, regex pattern
# #remoteIpHeader: 'x-forwarded-for'
# ###### see https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html for regex help
# ###### more information about XFF https://en.wikipedia.org/wiki/X-Forwarded-For
# ###### and here https://tools.ietf.org/html/rfc7239
# ###### and https://tomcat.apache.org/tomcat-8.0-doc/config/valve.html#Remote_IP_Valve
dynamic:
http:
anonymous_auth_enabled: false
xff:
enabled: false
internalProxies: '192\.168\.0\.10|192\.168\.0\.11' # regex pattern
authc:
basic_internal_auth_domain:
http_enabled: true
transport_enabled: true
order: 0
http_authenticator:
type: basic
challenge: false
authentication_backend:
type: internal
openid_auth_domain:
http_enabled: true
transport_enabled: true
order: 1
http_authenticator:
type: openid
challenge: false
config:
subject_key: preferred_username
roles_key: kibana_sso
openid_connect_url: "REDACTED"
authentication_backend:
type: noop
# Copyright 2019 Viasat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
kibana:
enabled: true
image: darkdragn/opendistro-for-elasticsearch-kibana
imageTag: 1.10.1.2
## Specifies the image pull policy. Can be "Always" or "IfNotPresent" or "Never".
## Default to "Always".
imagePullPolicy: ""
replicas: 1
port: 5601
externalPort: 443
resources: {}
# limits:
# cpu: 2500m
# memory: 2Gi
# requests:
# cpu: 500m
# memory: 512Mi
readinessProbe: []
livenessProbe: []
elasticsearchAccount:
secret: ""
keyPassphrase:
enabled: false
extraEnvs: []
ssl:
kibana:
enabled: false
existingCertSecret:
existingCertSecretCertSubPath: kibana-crt.pem
existingCertSecretKeySubPath: kibana-key.pem
existingCertSecretRootCASubPath: kibana-root-ca.pem
elasticsearch:
enabled: false
existingCertSecret:
existingCertSecretCertSubPath: elk-rest-crt.pem
existingCertSecretKeySubPath: elk-rest-key.pem
existingCertSecretRootCASubPath: elk-rest-root-ca.pem
configDirectory: "/usr/share/kibana/config"
certsDirectory: "/usr/share/kibana/certs"
ingress:
## Set to true to enable ingress record generation
enabled: true
annotations:
traefik.ingress.kubernetes.io/router.tls.certresolver: myresolver
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
hosts:
- REDACTED
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
service:
type: ClusterIP
annotations: {}
config:
## Default Kibana configuration from kibana-docker.
server.name: kibana
server.host: "0"
#server.cors : true
#server.customResponseHeaders : { "Access-Control-Allow-Credentials" : "true" }
# Enable OpenID authentication
opendistro_security.auth.type: "openid"
opendistro_security.openid.connect_url: "REDACTED"
opendistro_security.openid.client_id: "kibana"
opendistro_security.openid.client_secret: "REDACTED"
opendistro_security.openid.base_redirect_url: "https://$REMOVED"
opendistro_security.cookie.secure: false
opendistro_security.cookie.password: ""
elasticsearch.hosts: https://opendistro-es-client-service:9200
elasticsearch.username: kibanaserver
elasticsearch.password: kibanaserver
elasticsearch.ssl.verificationMode: none
elasticsearch.requestHeadersWhitelist: ["Authorization", "securitytenant"]
opendistro_security.multitenancy.enabled: true
opendistro_security.multitenancy.tenants.preferred: ["Private", "Global"]
opendistro_security.readonly_mode.roles: ["kibana_read_only"]
#newsfeed.enabled: false
#telemetry.optIn: false
#telemetry.enabled: false
## Replace with Elasticsearch DNS name picked during Service deployment
# elasticsearch.hosts: ${ELASTIC_URL}
# elasticsearch.requestTimeout: 360000
## Kibana TLS Config
# server.ssl.enabled: true
# server.ssl.key: /usr/share/kibana/certs/kibana-key.pem
# server.ssl.certificate: /usr/share/kibana/certs/kibana-crt.pem
# elasticsearch.ssl.certificateAuthorities: /usr/share/kibana/certs/kibana-root-ca.pem
# opendistro_security.cookie.secure: true
# opendistro_security.cookie.password: ${COOKIE_PASS}
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
affinity: {}
serviceAccount:
## Specifies whether a ServiceAccount should be created
create: true
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
name:
global:
clusterName: elasticsearch
psp:
create: true
rbac:
enabled: true
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
# imagePullSecrets:
# - myRegistryKeySecretName
elasticsearch:
## Used when deploying hot/warm architecture. Allows second aliased deployment to find cluster.
## Default {{ template opendistro-es.fullname }}-discovery.
discoveryOverride: ""
securityConfig:
enabled: true
path: "/usr/share/elasticsearch/plugins/opendistro_security/securityconfig"
actionGroupsSecret:
configSecret: opendistro-es-security-config
internalUsersSecret:
rolesSecret:
rolesMappingSecret:
tenantsSecret:
#The following option simplifies securityConfig by using a single secret and specifying the respective secrets in the corresponding files instead of creating different secrets for config,internal users, roles, roles mapping and tenants
#Note that this is an alternative to the above secrets and shouldn't be used if the above secrets are used
config:
securityConfigSecret:
data: {}
# config.yml: |-
# internal_users.yml: |-
# roles.yml: |-
# rolesMapping.yml: |-
# tenants.yml: |-
extraEnvs: []
extraInitContainers: []
# - name: do-something
# image: busybox
# command: ['do', 'something']
extraVolumes: []
# - name: extras
# emptyDir: {}
extraVolumeMounts: []
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
initContainer:
image: busybox
imageTag: 1.27.2
## Set optimal sysctl's. This requires privilege. Can be disabled if
## the system has already been preconfigured.
sysctl:
enabled: true
ssl:
## TLS is mandatory for the transport layer and can not be disabled
transport:
existingCertSecret:
existingCertSecretCertSubPath: elk-transport-crt.pem
existingCertSecretKeySubPath: elk-transport-key.pem
existingCertSecretRootCASubPath: elk-transport-root-ca.pem
rest:
enabled: false
existingCertSecret:
existingCertSecretCertSubPath: elk-rest-crt.pem
existingCertSecretKeySubPath: elk-rest-key.pem
existingCertSecretRootCASubPath: elk-rest-root-ca.pem
admin:
enabled: false
existingCertSecret:
existingCertSecretCertSubPath: admin-crt.pem
existingCertSecretKeySubPath: admin-key.pem
existingCertSecretRootCASubPath: admin-root-ca.pem
master:
enabled: true
replicas: 1
updateStrategy: "RollingUpdate"
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
##
# existingClaim:
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
##
subPath: ""
## Open Distro master Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
resources: {}
# limits:
# cpu: 1
# memory: 1024Mi
# requests:
# cpu: 200m
# memory: 1024Mi
javaOpts: "-Xms512m -Xmx512m"
podDisruptionBudget:
enabled: false
minAvailable: 1
readinessProbe: []
livenessProbe:
tcpSocket:
port: transport
initialDelaySeconds: 60
periodSeconds: 10
nodeSelector: {}
tolerations: []
## Anti-affinity to disallow deploying client and master nodes on the same worker node
affinity: {}
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# role: master
podAnnotations: {}
data:
enabled: true
replicas: 2
updateStrategy: "RollingUpdate"
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
##
# existingClaim:
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
##
subPath: ""
## Open Distro master Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
resources: {}
# limits:
# cpu: 1
# memory: 1024Mi
# requests:
# cpu: 200m
# memory: 1024Mi
javaOpts: "-Xms512m -Xmx512m"
podDisruptionBudget:
enabled: false
minAvailable: 1
readinessProbe: []
livenessProbe:
tcpSocket:
port: transport
initialDelaySeconds: 60
periodSeconds: 10
nodeSelector: {}
tolerations: []
## Anti-affinity to disallow deploying client and master nodes on the same worker node
affinity: {}
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# podAffinityTerm:
# topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# role: data
podAnnotations: {}
client:
enabled: true
service:
type: ClusterIP
annotations: {}
# # Defined ELB backend protocol as HTTPS to allow connection to Elasticsearch API
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: https
# # ARN of ACM certificate registered to the deployed ELB for handling connections over TLS
# # ACM certificate should be issued to the DNS hostname defined earlier (elk.sec.example.com)
# service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:us-east-1:111222333444:certificate/c69f6022-b24f-43d9-b9c8-dfe288d9443d"
# service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https"
# service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true"
# service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60"
# service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
# # Annotation to create internal only ELB
# service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
replicas: 1
javaOpts: "-Xms512m -Xmx512m"
ingress:
## Set to true to enable ingress record generation
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# limits:
# cpu: 1
# memory: 1024Mi
# requests:
# cpu: 200m
# memory: 1024Mi
podDisruptionBudget:
enabled: false
minAvailable: 1
readinessProbe: []
livenessProbe:
tcpSocket:
port: transport
initialDelaySeconds: 60
periodSeconds: 10
nodeSelector: {}
tolerations: []
## Weighted anti-affinity to disallow deploying client node to the same worker node as master node
affinity: {}
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# podAffinityTerm:
# topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# role: client
podAnnotations: {}
config:
# cluster.name: "docker-cluster"
# ## Example Config
# opendistro_security.allow_unsafe_democertificates: false
# opendistro_security.allow_default_init_securityindex: true
# opendistro_security.audit.type: internal_elasticsearch
# opendistro_security.enable_snapshot_restore_privilege: true
# opendistro_security.check_snapshot_restore_write_privileges: true
# cluster.routing.allocation.disk.threshold_enabled: false
# opendistro_security.audit.config.disabled_rest_categories: NONE
# opendistro_security.audit.config.disabled_transport_categories: NONE
# node:
# master: opendistro-es-master-0
# data: opendistro-es-data-0
# name: opendistro-es-data-0
# ingest: opendistro-es-data-0
# max_local_storage_nodes: 1
# attr.box_type: hot
# processors: 4
# # network.host: ${NETWORK_HOST}
# thread_pool.bulk.queue_size: 800
# path:
# data: /usr/share/elasticsearch/data
# logs: /usr/share/elasticsearch/logs
# http:
# enabled: true
# compression: true
# discovery:
# zen:
# ping.unicast.hosts: opendistro-es-discovery
# minimum_master_nodes: 1
# # # TLS Configuration Transport Layer
# opendistro_security.ssl.transport.pemcert_filepath: elk-transport-crt.pem
# opendistro_security.ssl.transport.pemkey_filepath: elk-transport-key.pem
# opendistro_security.ssl.transport.pemtrustedcas_filepath: elk-transport-root-ca.pem
# opendistro_security.ssl.transport.enforce_hostname_verification: false
# # # TLS Configuration REST Layer
# opendistro_security.ssl.http.enabled: true
# opendistro_security.ssl.http.pemcert_filepath: elk-rest-crt.pem
# opendistro_security.ssl.http.pemkey_filepath: elk-rest-key.pem
# opendistro_security.ssl.http.pemtrustedcas_filepath: elk-rest-root-ca.pem
# # # minimum_master_nodes need to be explicitly set when bound on a public IP
# # # set to 1 to allow single node clusters
# # # Details: https://github.com/elastic/elasticsearch/pull/17288
# discovery.zen.minimum_master_nodes: 1
# # Breaking change in 7.0
# # https://www.elastic.co/guide/en/elasticsearch/reference/7.0/breaking-changes-7.0.html#breaking_70_discovery_changes
# cluster.initial_master_nodes:
# - elasticsearch1
# - docker-test-node-1
log4jConfig: ""
loggingConfig:
## Default config
## you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
rootLogger: ${es.logger.level}, console
logger:
## log action execution errors for easier debugging
action: DEBUG
## reduce the logging for aws, too much is logged under the default INFO
com.amazonaws: WARN
appender:
console:
type: console
layout:
type: consolePattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
transportKeyPassphrase:
enabled: false
passPhrase:
sslKeyPassphrase:
enabled: false
passPhrase:
maxMapCount: 262144
image: amazon/opendistro-for-elasticsearch
imageTag: 1.10.1
## Specifies the image pull policy. Can be "Always" or "IfNotPresent" or "Never".
## Default to "Always".
imagePullPolicy: ""
configDirectory: /usr/share/elasticsearch/config
serviceAccount:
## Specifies whether a ServiceAccount should be created
create: true
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
name:
nameOverride: ""
fullnameOverride: ""
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment