Skip to content

Instantly share code, notes, and snippets.

@andycmaj
Forked from mweibel/README.md
Created May 5, 2022 17:40
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save andycmaj/7f7b50394c87b136762b46d8a91106e5 to your computer and use it in GitHub Desktop.
Save andycmaj/7f7b50394c87b136762b46d8a91106e5 to your computer and use it in GitHub Desktop.
cortex configuration

README

Do not use initial-values.yaml. It's an example for our blog post and does not show values safe to use in production.

image:
tag: "v1.11.0"
tags:
# block storage memcached caching
blocks-storage-memcached: true
ingress:
enabled: true
ingressClass:
enabled: true
name: "nginx"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: domain.example.org
paths:
- /
tls:
- hosts:
- domain.example.org
secretName: somesecret
useConfigMap: true
config:
auth_enabled: true
limits:
reject_old_samples_max_age: 12h
max_series_per_metric: 200000
ingestion_rate: 50000
storage:
engine: blocks
blocks_storage:
backend: gcs
gcs:
bucket_name: cortex-bucket
service_account: |
${GCS_SERVICE_ACCOUNT}
tsdb:
retention_period: 24h
bucket_store:
index_cache:
memcached:
timeout: 300ms
max_idle_connections: 750
max_async_concurrency: 100
max_async_buffer_size: 10000000
max_get_multi_concurrency: 750
max_get_multi_batch_size: 1000
max_item_size: 16777216
chunks_cache:
memcached:
timeout: 300ms
max_idle_connections: 750
max_async_concurrency: 100
max_async_buffer_size: 10000000
max_get_multi_concurrency: 750
max_get_multi_batch_size: 1000
max_item_size: 33554432
metadata_cache:
memcached:
timeout: 300ms
max_idle_connections: 750
max_async_concurrency: 100
max_async_buffer_size: 10000000
max_get_multi_concurrency: 750
max_get_multi_batch_size: 1000
max_item_size: 16777216
sync_dir: /data/tsdb-sync
bucket_index:
enabled: true
# -- https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config
store_gateway:
sharding_enabled: false
ingester_client:
grpc_client_config:
grpc_compression: snappy
distributor:
ha_tracker:
enable_ha_tracker: true
kvstore:
store: consul
consul:
host: consul-consul-server.consul.svc:8500
shard_by_all_labels: true
pool:
health_check_ingesters: true
memberlist:
bind_port: 7946
# -- the service name of the memberlist
# if using memberlist discovery
join_members:
- '{{ include "cortex.fullname" $ }}-memberlist'
querier:
# https://cortexmetrics.io/docs/blocks-storage/production-tips/
query_ingesters_within: 12h5m
query_store_after: 12h
query_range:
split_queries_by_interval: 24h
align_queries_with_step: true
cache_results: true
results_cache:
cache:
memcached:
expiration: 1h
memcached_client:
timeout: 1s
ruler:
enable_alertmanager_discovery: false
# -- Enable the experimental ruler config api.
enable_api: true
ruler_storage:
backend: gcs
gcs:
bucket_name: cortex-bucket-alerts
service_account: |
${GCS_SERVICE_ACCOUNT}
runtime_config:
file: /etc/cortex-runtime-config/runtime_config.yaml
alertmanager:
# -- Enable the experimental alertmanager config api.
enable_api: true
alertmanager_storage:
backend: gcs
gcs:
bucket_name: cortex-bucket-alerts
service_account: |
${GCS_SERVICE_ACCOUNT}
frontend:
log_queries_longer_than: 10s
ingester:
lifecycler:
ring:
kvstore:
store: consul
consul:
host: consul-consul-server.consul.svc:8500
alertmanager:
enabled: true
replicas: 1
statefulSet:
enabled: true
extraArgs:
"config.expand-env": true
env:
- name: GCS_SERVICE_ACCOUNT
valueFrom:
secretKeyRef:
name: gcs-service-account
key: key
serviceMonitor:
enabled: true
# cortex-mixin expects job to be namespace/component
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
persistentVolume:
enabled: true
size: 10Gi
storageClass: standard-rwo
containerSecurityContext:
enabled: true
readOnlyRootFilesystem: false # allow to load/validate alertmanager rules: https://github.com/cortexproject/cortex/issues/4089
distributor:
replicas: 2
serviceMonitor:
enabled: true
# cortex-mixin expects job to be namespace/component
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
ingester:
replicas: 3
statefulSet:
enabled: true
resources:
limits:
memory: 6Gi
requests:
memory: 6Gi
cpu: 500m
extraArgs:
"config.expand-env": true
env:
- name: GCS_SERVICE_ACCOUNT
valueFrom:
secretKeyRef:
name: gcs-service-account
key: key
persistentVolume:
# https://cortexmetrics.io/docs/blocks-storage/production-tips/
size: 100Gi
storageClass: premium-rwo
autoscaling:
enabled: true
minReplicas: 3
maxReplicas: 10
targetMemoryUtilizationPercentage: 70
serviceMonitor:
enabled: true
# cortex-mixin expects job to be namespace/component
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
ruler:
enabled: true
replicas: 1
extraArgs:
"config.expand-env": true
env:
- name: GCS_SERVICE_ACCOUNT
valueFrom:
secretKeyRef:
name: gcs-service-account
key: key
serviceMonitor:
enabled: true
# cortex-mixin expects job to be namespace/component
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
podDisruptionBudget:
maxUnavailable: 1
querier:
replicas: 2
extraArgs:
"config.expand-env": true
env:
- name: GCS_SERVICE_ACCOUNT
valueFrom:
secretKeyRef:
name: gcs-service-account
key: key
serviceMonitor:
enabled: true
# cortex-mixin expects job to be namespace/component
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
query_frontend:
replicas: 2
serviceMonitor:
enabled: true
# cortex-mixin expects job to be namespace/component
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
configs:
enabled: false
nginx:
enabled: true
replicas: 2
config:
client_max_body_size: 5M
auth_orgs: [] # a list of tenants
basicAuthSecretName: "nginx-htpasswd"
setHeaders:
X-Scope-OrgID: $remote_user
requests:
cpu: 100m
memory: 256Mi
store_gateway:
replicas: 2
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- store-gateway
topologyKey: 'kubernetes.io/hostname'
extraArgs:
"config.expand-env": true
env:
- name: GCS_SERVICE_ACCOUNT
valueFrom:
secretKeyRef:
name: gcs-service-account
key: key
persistentVolume:
enabled: true
size: 120Gi
subPath: ''
storageClass: premium-rwo
podDisruptionBudget:
maxUnavailable: 1
serviceMonitor:
enabled: true
# cortex-mixin expects job to be namespace/component
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
compactor:
enabled: true
replicas: 1
persistentVolume:
enabled: true
size: 200Gi
storageClass: premium-rwo
serviceMonitor:
enabled: true
# cortex-mixin expects job to be namespace/component
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
extraArgs:
"config.expand-env": true
env:
- name: GCS_SERVICE_ACCOUNT
valueFrom:
secretKeyRef:
name: gcs-service-account
key: key
memcached-frontend:
enabled: true
architecture: "high-availability"
replicaCount: 2
podDisruptionBudget:
create: true
resources:
limits:
memory: 1120Mi
cpu: 1
requests:
memory: 256Mi
cpu: 250m
extraEnv:
# -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage
- name: MEMCACHED_CACHE_SIZE
value: "1024"
# -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service
- name: MEMCACHED_MAX_CONNECTIONS
value: "1024"
# -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests.
# By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of
# storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values.
- name: MEMCACHED_THREADS
value: "4"
metrics:
enabled: true
resources:
limits:
cpu: 200m
memory: 128Mi
serviceMonitor:
enabled: true
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
memcached-blocks-index:
architecture: "high-availability"
replicaCount: 2
podDisruptionBudget:
create: true
resources:
limits:
memory: 1120Mi
cpu: 1
requests:
memory: 256Mi
cpu: 250m
extraEnv:
# -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage
- name: MEMCACHED_CACHE_SIZE
value: "1024"
# -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service
- name: MEMCACHED_MAX_CONNECTIONS
value: "1024"
# -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests.
# By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of
# storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values.
- name: MEMCACHED_THREADS
value: "4"
metrics:
enabled: true
resources:
limits:
cpu: 200m
memory: 128Mi
serviceMonitor:
enabled: true
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
memcached-blocks:
architecture: "high-availability"
replicaCount: 2
podDisruptionBudget:
create: true
resources:
limits:
memory: 1120Mi
cpu: 1
requests:
memory: 1120Mi
cpu: 250m
extraEnv:
# -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage
- name: MEMCACHED_CACHE_SIZE
value: "1024"
# -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service
- name: MEMCACHED_MAX_CONNECTIONS
value: "1024"
# -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests.
# By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of
# storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values.
- name: MEMCACHED_THREADS
value: "4"
metrics:
enabled: true
resources:
limits:
cpu: 200m
memory: 128Mi
serviceMonitor:
enabled: true
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
memcached-blocks-metadata:
# enabled/disabled via the tags.blocks-storage-memcached boolean
architecture: "high-availability"
replicaCount: 2
podDisruptionBudget:
create: true
resources:
limits:
memory: 1120Mi
cpu: 1
requests:
memory: 256Mi
cpu: 250m
extraEnv:
# -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage
- name: MEMCACHED_CACHE_SIZE
value: "1024"
# -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service
- name: MEMCACHED_MAX_CONNECTIONS
value: "1024"
# -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests.
# By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of
# storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values.
- name: MEMCACHED_THREADS
value: "4"
metrics:
enabled: true
resources:
limits:
cpu: 200m
memory: 128Mi
serviceMonitor:
enabled: true
relabelings:
- sourceLabels: [ __meta_kubernetes_service_label_cluster ]
regex: (.*)
replacement: $1
targetLabel: cluster
action: replace
- sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
targetLabel: job
separator: "/"
--- path/to/initial-values.yaml 2022-02-01 13:06:56.103508403 +0100
+++ path/to/current-values.yaml 2022-02-01 13:06:51.343597896 +0100
@@ -1,6 +1,10 @@
image:
tag: "v1.11.0"
+tags:
+ # block storage memcached caching
+ blocks-storage-memcached: true
+
ingress:
enabled: true
ingressClass:
@@ -17,10 +21,14 @@
- domain.example.org
secretName: somesecret
+useConfigMap: true
+
config:
auth_enabled: true
limits:
reject_old_samples_max_age: 12h
+ max_series_per_metric: 200000
+ ingestion_rate: 50000
storage:
engine: blocks
blocks_storage:
@@ -28,31 +36,46 @@
gcs:
bucket_name: cortex-bucket
service_account: |
- {
- "type": "service_account",
- ... SERVICE ACCOUNT CREDENTIALS
- }
+ ${GCS_SERVICE_ACCOUNT}
tsdb:
- dir: /data/tsdb
+ retention_period: 24h
bucket_store:
index_cache:
- backend: memcached
memcached:
- addresses: dns+memcached-blocks-index.cortex.svc:11211
+ timeout: 300ms
+ max_idle_connections: 750
+ max_async_concurrency: 100
+ max_async_buffer_size: 10000000
+ max_get_multi_concurrency: 750
+ max_get_multi_batch_size: 1000
+ max_item_size: 16777216
chunks_cache:
- backend: memcached
memcached:
- addresses: dns+memcached-blocks.cortex.svc:11211
+ timeout: 300ms
+ max_idle_connections: 750
+ max_async_concurrency: 100
+ max_async_buffer_size: 10000000
+ max_get_multi_concurrency: 750
+ max_get_multi_batch_size: 1000
+ max_item_size: 33554432
metadata_cache:
- backend: memcached
memcached:
- addresses: dns+memcached-blocks-metadata.cortex.svc:11211
+ timeout: 300ms
+ max_idle_connections: 750
+ max_async_concurrency: 100
+ max_async_buffer_size: 10000000
+ max_get_multi_concurrency: 750
+ max_get_multi_batch_size: 1000
+ max_item_size: 16777216
sync_dir: /data/tsdb-sync
bucket_index:
enabled: true
# -- https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config
store_gateway:
sharding_enabled: false
+ ingester_client:
+ grpc_client_config:
+ grpc_compression: snappy
distributor:
ha_tracker:
enable_ha_tracker: true
@@ -70,23 +93,9 @@
join_members:
- '{{ include "cortex.fullname" $ }}-memberlist'
querier:
- active_query_tracker_dir: /data/active-query-tracker
- # -- Maximum lookback beyond which queries are not sent to ingester. 0 means all
- # queries are sent to ingester. Ingesters by default have no data older than 12 hours,
- # so we can safely set this 13 hours
- query_ingesters_within: 13h
- # -- The time after which a metric should be queried from storage and not just
- # ingesters.
+ # https://cortexmetrics.io/docs/blocks-storage/production-tips/
+ query_ingesters_within: 12h5m
query_store_after: 12h
- # -- Comma separated list of store-gateway addresses in DNS Service Discovery
- # format. This option should is set automatically when using the blocks storage and the
- # store-gateway sharding is disabled (when enabled, the store-gateway instances
- # form a ring and addresses are picked from the ring).
- # @default -- automatic
- store_gateway_addresses: |-
- {{ if and (eq .Values.config.storage.engine "blocks") (not .Values.config.store_gateway.sharding_enabled) -}}
- dns+{{ include "cortex.storeGatewayFullname" $ }}-headless:9095
- {{- end }}
query_range:
split_queries_by_interval: 24h
align_queries_with_step: true
@@ -97,7 +106,6 @@
expiration: 1h
memcached_client:
timeout: 1s
- addresses: dns+memcached-frontend.cortex.svc:11211
ruler:
enable_alertmanager_discovery: false
# -- Enable the experimental ruler config api.
@@ -105,27 +113,20 @@
ruler_storage:
backend: gcs
gcs:
- bucket_name: cortex-bucket
+ bucket_name: cortex-bucket-alerts
service_account: |
- {
- "type": "service_account",
- ... SERVICE ACCOUNT CREDENTIALS
- }
+ ${GCS_SERVICE_ACCOUNT}
runtime_config:
file: /etc/cortex-runtime-config/runtime_config.yaml
alertmanager:
# -- Enable the experimental alertmanager config api.
- enable_api: false
- external_url: '/api/prom/alertmanager'
+ enable_api: true
alertmanager_storage:
backend: gcs
gcs:
- bucket_name: cortex-bucket
+ bucket_name: cortex-bucket-alerts
service_account: |
- {
- "type": "service_account",
- ... SERVICE ACCOUNT CREDENTIALS
- }
+ ${GCS_SERVICE_ACCOUNT}
frontend:
log_queries_longer_than: 10s
ingester:
@@ -135,13 +136,6 @@
store: consul
consul:
host: consul-consul-server.consul.svc:8500
-runtimeconfigmap:
- # -- If true, a configmap for the `runtime_config` will be created.
- # If false, the configmap _must_ exist already on the cluster or pods will fail to create.
- create: true
- annotations: {}
- # -- https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file
- runtime_config: {}
alertmanager:
enabled: true
@@ -150,415 +144,125 @@
statefulSet:
enabled: true
- serviceAccount:
- # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
- name: ""
+ extraArgs:
+ "config.expand-env": true
+ env:
+ - name: GCS_SERVICE_ACCOUNT
+ valueFrom:
+ secretKeyRef:
+ name: gcs-service-account
+ key: key
+
+ serviceMonitor:
+ enabled: true
+ # cortex-mixin expects job to be namespace/component
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
persistentVolume:
enabled: true
size: 10Gi
storageClass: standard-rwo
- startupProbe:
- httpGet:
- path: /ready
- port: http-metrics
- failureThreshold: 10
- livenessProbe:
- httpGet:
- path: /ready
- port: http-metrics
- readinessProbe:
- httpGet:
- path: /ready
- port: http-metrics
-
- securityContext: {}
-
containerSecurityContext:
enabled: true
- readOnlyRootFilesystem: true
-
- # -- Tolerations for pod assignment
- # ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
- tolerations: []
-
- # -- If not set then a PodDisruptionBudget will not be created
- podDisruptionBudget:
- maxUnavailable: 1
-
- strategy:
- type: RollingUpdate
- rollingUpdate:
- maxSurge: 0
- maxUnavailable: 1
- statefulStrategy:
- type: RollingUpdate
-
- terminationGracePeriodSeconds: 60
-
- # -- Init containers to be added to the cortex pod.
- initContainers: []
-
- # -- Additional containers to be added to the cortex pod.
- extraContainers: []
-
- # -- Additional volumes to the cortex pod.
- extraVolumes: []
-
- # -- Extra volume mounts that will be added to the cortex container
- extraVolumeMounts: []
-
- # -- Additional ports to the cortex services. Useful to expose extra container ports.
- extraPorts: []
-
- # -- Extra env variables to pass to the cortex container
- env: []
-
- # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
- sidecar:
- image:
- repository: quay.io/kiwigrid/k8s-sidecar
- tag: 1.10.7
- sha: ""
- imagePullPolicy: IfNotPresent
- resources: {}
- # -- skipTlsVerify Set to true to skip tls verification for kube api calls
- skipTlsVerify: false
- enableUniqueFilenames: false
- enabled: false
- label: cortex_alertmanager
- watchMethod: null
- labelValue: null
- folder: /data
- defaultFolderName: null
- searchNamespace: null
- folderAnnotation: null
- containerSecurityContext:
- enabled: true
- readOnlyRootFilesystem: true
+ readOnlyRootFilesystem: false # allow to load/validate alertmanager rules: https://github.com/cortexproject/cortex/issues/4089
distributor:
replicas: 2
- service:
- annotations: {}
- labels: {}
-
- serviceAccount:
- # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
- name: ""
-
serviceMonitor:
- enabled: false
- additionalLabels: {}
- relabelings: []
- metricRelabelings: []
- # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
- extraEndpointSpec: {}
-
- resources: {}
-
- # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
- extraArgs: {}
-
- # -- Pod Labels
- podLabels: {}
-
- # -- Pod Annotations
- podAnnotations:
- prometheus.io/scrape: 'true'
- prometheus.io/port: 'http-metrics'
-
- nodeSelector: {}
- affinity:
- podAntiAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: app.kubernetes.io/component
- operator: In
- values:
- - distributor
- topologyKey: 'kubernetes.io/hostname'
-
- annotations: {}
-
- autoscaling:
- # -- Creates a HorizontalPodAutoscaler for the distributor pods.
- enabled: false
- minReplicas: 2
- maxReplicas: 30
- targetCPUUtilizationPercentage: 80
- targetMemoryUtilizationPercentage: 0 # 80
- # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior
- behavior: {}
-
- persistentVolume:
- subPath:
-
- startupProbe:
- httpGet:
- path: /ready
- port: http-metrics
- failureThreshold: 10
- livenessProbe:
- httpGet:
- path: /ready
- port: http-metrics
- readinessProbe:
- httpGet:
- path: /ready
- port: http-metrics
-
- securityContext: {}
-
- containerSecurityContext:
enabled: true
- readOnlyRootFilesystem: true
-
- strategy:
- type: RollingUpdate
- rollingUpdate:
- maxSurge: 0
- maxUnavailable: 1
-
- terminationGracePeriodSeconds: 60
-
- tolerations: []
-
- podDisruptionBudget:
- maxUnavailable: 1
-
- initContainers: []
- extraContainers: []
- extraVolumes: []
- extraVolumeMounts: []
- extraPorts: []
- env: []
- lifecycle: {}
+ # cortex-mixin expects job to be namespace/component
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
ingester:
replicas: 3
statefulSet:
- # -- If true, use a statefulset instead of a deployment for pod management.
- # This is useful when using WAL
- enabled: false
- # -- ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details
- podManagementPolicy: OrderedReady
-
- service:
- annotations: {}
- labels: {}
-
- serviceAccount:
- name:
+ enabled: true
- serviceMonitor:
- enabled: false
- additionalLabels: {}
- relabelings: []
- metricRelabelings: []
- # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
- extraEndpointSpec: {}
-
- resources: {}
-
- # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
- extraArgs: {}
-
- # -- Pod Labels
- podLabels: {}
-
- # -- Pod Annotations
- podAnnotations:
- prometheus.io/scrape: 'true'
- prometheus.io/port: 'http-metrics'
+ resources:
+ limits:
+ memory: 6Gi
+ requests:
+ memory: 6Gi
+ cpu: 500m
+
+ extraArgs:
+ "config.expand-env": true
+ env:
+ - name: GCS_SERVICE_ACCOUNT
+ valueFrom:
+ secretKeyRef:
+ name: gcs-service-account
+ key: key
- nodeSelector: {}
- affinity:
- podAntiAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: app.kubernetes.io/component
- operator: In
- values:
- - ingester
- topologyKey: 'kubernetes.io/hostname'
-
- annotations: {}
+ persistentVolume:
+ # https://cortexmetrics.io/docs/blocks-storage/production-tips/
+ size: 100Gi
+ storageClass: premium-rwo
autoscaling:
- enabled: false
- minReplicas: 3
- maxReplicas: 30
- targetMemoryUtilizationPercentage: 80
- behavior:
- scaleDown:
- # -- see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details
- policies:
- - type: Pods
- value: 1
- # set to no less than 2x the maximum between -blocks-storage.bucket-store.sync-interval and -compactor.cleanup-interval
- periodSeconds: 1800
- # -- uses metrics from the past 1h to make scaleDown decisions
- stabilizationWindowSeconds: 3600
- scaleUp:
- # -- This default scaleup policy allows adding 1 pod every 30 minutes.
- # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior
- policies:
- - type: Pods
- value: 1
- periodSeconds: 1800
-
- lifecycle:
- # -- The /shutdown preStop hook is recommended as part of the ingester
- # scaledown process, but can be removed to optimize rolling restarts in
- # instances that will never be scaled down or when using chunks storage
- # with WAL disabled.
- # https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down
- preStop:
- httpGet:
- path: "/ingester/shutdown"
- port: http-metrics
-
- persistentVolume:
- # -- If true and ingester.statefulSet.enabled is true,
- # Ingester will create/use a Persistent Volume Claim
- # If false, use emptyDir
enabled: true
+ minReplicas: 3
+ maxReplicas: 10
+ targetMemoryUtilizationPercentage: 70
- # -- Ingester data Persistent Volume Claim annotations
- annotations: {}
-
- # -- Ingester data Persistent Volume access modes
- # Must match those of existing PV or dynamic provisioner
- # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
- accessModes:
- - ReadWriteOnce
-
- # -- Ingester data Persistent Volume size
- size: 2Gi
-
- # -- Subdirectory of Ingester data Persistent Volume to mount
- # Useful if the volume's root directory is not empty
- subPath: ''
-
- # -- Ingester data Persistent Volume Storage Class
- # If defined, storageClassName: <storageClass>
- # If set to "-", storageClassName: "", which disables dynamic provisioning
- # If undefined (the default) or set to null, no storageClassName spec is
- # set, choosing the default provisioner.
- storageClass: null
-
- startupProbe:
- failureThreshold: 60
- initialDelaySeconds: 120
- periodSeconds: 30
- httpGet:
- path: /ready
- port: http-metrics
- scheme: HTTP
- # -- Liveness probes for ingesters are not recommended.
- # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters
- livenessProbe: {}
- readinessProbe:
- httpGet:
- path: /ready
- port: http-metrics
-
- securityContext: {}
-
- containerSecurityContext:
+ serviceMonitor:
enabled: true
- readOnlyRootFilesystem: true
-
- strategy:
- type: RollingUpdate
- rollingUpdate:
- maxSurge: 0
- maxUnavailable: 1
- statefulStrategy:
- type: RollingUpdate
-
- terminationGracePeriodSeconds: 240
-
- tolerations: []
-
- podDisruptionBudget:
- maxUnavailable: 1
-
- initContainers: []
- extraContainers: []
- extraVolumes: []
- extraVolumeMounts: []
- extraPorts: []
- env: []
+ # cortex-mixin expects job to be namespace/component
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
ruler:
enabled: true
replicas: 1
- service:
- annotations: {}
- labels: {}
-
- serviceAccount:
- # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
- name: ""
+ extraArgs:
+ "config.expand-env": true
+ env:
+ - name: GCS_SERVICE_ACCOUNT
+ valueFrom:
+ secretKeyRef:
+ name: gcs-service-account
+ key: key
serviceMonitor:
- enabled: false
- additionalLabels: {}
- relabelings: []
- metricRelabelings: []
- # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
- extraEndpointSpec: {}
-
- resources: {}
-
- # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
- extraArgs: {}
-
- # -- Pod Labels
- podLabels: {}
-
- # -- Pod Annotations
- podAnnotations:
- prometheus.io/scrape: 'true'
- prometheus.io/port: 'http-metrics'
-
- nodeSelector: {}
- affinity: {}
- annotations: {}
- persistentVolume:
- subPath:
-
- startupProbe:
- httpGet:
- path: /ready
- port: http-metrics
- failureThreshold: 10
- livenessProbe:
- httpGet:
- path: /ready
- port: http-metrics
- readinessProbe:
- httpGet:
- path: /ready
- port: http-metrics
-
- securityContext: {}
-
- containerSecurityContext:
enabled: true
- readOnlyRootFilesystem: true
+ # cortex-mixin expects job to be namespace/component
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
+
strategy:
type: RollingUpdate
@@ -566,408 +270,52 @@
maxSurge: 0
maxUnavailable: 1
- terminationGracePeriodSeconds: 180
-
- tolerations: []
-
podDisruptionBudget:
maxUnavailable: 1
- initContainers: []
- extraContainers: []
- extraVolumes: []
- extraVolumeMounts: []
- extraPorts: []
- env: []
- # -- allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html
- directories: {}
-
- # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
- sidecar:
- image:
- repository: quay.io/kiwigrid/k8s-sidecar
- tag: 1.10.7
- sha: ""
- imagePullPolicy: IfNotPresent
- resources: {}
- # limits:
- # cpu: 100m
- # memory: 100Mi
- # requests:
- # cpu: 50m
- # memory: 50Mi
- # skipTlsVerify Set to true to skip tls verification for kube api calls
- # skipTlsVerify: true
- enableUniqueFilenames: false
- enabled: false
- # -- label that the configmaps with rules are marked with
- label: cortex_rules
- watchMethod: null
- # -- value of label that the configmaps with rules are set to
- labelValue: null
- # -- folder in the pod that should hold the collected rules (unless `defaultFolderName` is set)
- folder: /tmp/rules
- # -- The default folder name, it will create a subfolder under the `folder` and put rules in there instead
- defaultFolderName: null
- # -- If specified, the sidecar will search for rules config-maps inside this namespace.
- # Otherwise the namespace in which the sidecar is running will be used.
- # It's also possible to specify ALL to search in all namespaces
- searchNamespace: null
- # -- If specified, the sidecar will look for annotation with this name to create folder and put graph here.
- # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
- folderAnnotation: null
- containerSecurityContext:
- enabled: true
- readOnlyRootFilesystem: true
-
querier:
replicas: 2
- service:
- annotations: {}
- labels: {}
-
- serviceAccount:
- # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
- name: ""
+ extraArgs:
+ "config.expand-env": true
+ env:
+ - name: GCS_SERVICE_ACCOUNT
+ valueFrom:
+ secretKeyRef:
+ name: gcs-service-account
+ key: key
serviceMonitor:
- enabled: false
- additionalLabels: {}
- relabelings: []
- metricRelabelings: []
- # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
- extraEndpointSpec: {}
-
- resources: {}
-
- # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
- extraArgs: {}
-
- # -- Pod Labels
- podLabels: {}
-
- # -- Pod Annotations
- podAnnotations:
- prometheus.io/scrape: 'true'
- prometheus.io/port: 'http-metrics'
-
- nodeSelector: {}
- affinity:
- podAntiAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: app.kubernetes.io/component
- operator: In
- values:
- - querier
- topologyKey: 'kubernetes.io/hostname'
-
- annotations: {}
-
- autoscaling:
- # -- Creates a HorizontalPodAutoscaler for the querier pods.
- enabled: false
- minReplicas: 2
- maxReplicas: 30
- targetCPUUtilizationPercentage: 80
- targetMemoryUtilizationPercentage: 0 # 80
- # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior
- behavior: {}
-
- persistentVolume:
- subPath:
-
- startupProbe:
- httpGet:
- path: /ready
- port: http-metrics
- failureThreshold: 10
- livenessProbe:
- httpGet:
- path: /ready
- port: http-metrics
- readinessProbe:
- httpGet:
- path: /ready
- port: http-metrics
-
- securityContext: {}
-
- containerSecurityContext:
enabled: true
- readOnlyRootFilesystem: true
-
- strategy:
- type: RollingUpdate
- rollingUpdate:
- maxSurge: 0
- maxUnavailable: 1
-
- terminationGracePeriodSeconds: 180
-
- tolerations: []
-
- podDisruptionBudget:
- maxUnavailable: 1
-
- initContainers: []
- extraContainers: []
- extraVolumes: []
- extraVolumeMounts: []
- extraPorts: []
- env: []
- lifecycle: {}
+ # cortex-mixin expects job to be namespace/component
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
query_frontend:
replicas: 2
- service:
- annotations: {}
- labels: {}
-
- serviceAccount:
- # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
- name: ""
-
serviceMonitor:
- enabled: false
- additionalLabels: {}
- relabelings: []
- metricRelabelings: []
- # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
- extraEndpointSpec: {}
-
- resources: {}
-
- # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
- extraArgs: {}
-
- # -- Pod Labels
- podLabels: {}
-
- # -- Pod Annotations
- podAnnotations:
- prometheus.io/scrape: 'true'
- prometheus.io/port: 'http-metrics'
-
- nodeSelector: {}
- affinity:
- podAntiAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: app.kubernetes.io/component
- operator: In
- values:
- - query-frontend
- topologyKey: 'kubernetes.io/hostname'
-
- annotations: {}
- persistentVolume:
- subPath:
-
- startupProbe:
- httpGet:
- path: /ready
- port: http-metrics
- failureThreshold: 10
- livenessProbe:
- httpGet:
- path: /ready
- port: http-metrics
- readinessProbe:
- httpGet:
- path: /ready
- port: http-metrics
-
- securityContext: {}
- containerSecurityContext:
enabled: true
- readOnlyRootFilesystem: true
-
- strategy:
- type: RollingUpdate
- rollingUpdate:
- maxSurge: 0
- maxUnavailable: 1
-
- terminationGracePeriodSeconds: 180
-
- tolerations: []
-
- podDisruptionBudget:
- maxUnavailable: 1
-
- initContainers: []
- extraContainers: []
- extraVolumes: []
- extraVolumeMounts: []
- extraPorts: []
- env: []
- lifecycle: {}
-
-table_manager:
- replicas: 1
-
- service:
- annotations: {}
- labels: {}
-
- serviceAccount:
- # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
- name: ""
-
- serviceMonitor:
- enabled: false
- additionalLabels: {}
- relabelings: []
- metricRelabelings: []
- # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
- extraEndpointSpec: {}
-
- resources: {}
-
- # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
- extraArgs: {}
-
- # -- Pod Labels
- podLabels: {}
-
- # -- Pod Annotations
- podAnnotations:
- prometheus.io/scrape: 'true'
- prometheus.io/port: 'http-metrics'
-
- nodeSelector: {}
- affinity: {}
- annotations: {}
- persistentVolume:
- subPath:
-
- startupProbe:
- httpGet:
- path: /ready
- port: http-metrics
- failureThreshold: 10
- livenessProbe:
- httpGet:
- path: /ready
- port: http-metrics
- readinessProbe:
- httpGet:
- path: /ready
- port: http-metrics
-
- securityContext: {}
-
- containerSecurityContext:
- enabled: true
- readOnlyRootFilesystem: true
-
- strategy:
- type: RollingUpdate
- rollingUpdate:
- maxSurge: 0
- maxUnavailable: 1
-
- terminationGracePeriodSeconds: 180
-
- tolerations: []
-
- podDisruptionBudget:
- maxUnavailable: 1
-
- initContainers: []
- extraContainers: []
- extraVolumes: []
- extraVolumeMounts: []
- extraPorts: []
- env: []
+ # cortex-mixin expects job to be namespace/component
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
configs:
enabled: false
- replicas: 1
-
- service:
- annotations: {}
- labels: {}
-
- serviceAccount:
- # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
- name: ""
-
- serviceMonitor:
- enabled: false
- additionalLabels: {}
- relabelings: []
- metricRelabelings: []
- # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
- extraEndpointSpec: {}
-
- resources: {}
-
- # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
- extraArgs: {}
-
- # -- Pod Labels
- podLabels: {}
-
- # -- Pod Annotations
- podAnnotations:
- prometheus.io/scrape: 'true'
- prometheus.io/port: 'http-metrics'
-
- nodeSelector: {}
- affinity: {}
- annotations: {}
- persistentVolume:
- subPath:
-
- startupProbe:
- httpGet:
- path: /ready
- port: http-metrics
- failureThreshold: 10
- livenessProbe:
- httpGet:
- path: /ready
- port: http-metrics
- readinessProbe:
- httpGet:
- path: /ready
- port: http-metrics
-
- securityContext: {}
-
- containerSecurityContext:
- enabled: true
- readOnlyRootFilesystem: true
-
- strategy:
- type: RollingUpdate
- rollingUpdate:
- maxSurge: 0
- maxUnavailable: 1
-
- terminationGracePeriodSeconds: 180
-
- tolerations: []
-
- podDisruptionBudget:
- maxUnavailable: 1
-
- initContainers: []
- extraContainers: []
- extraVolumes: []
- extraVolumeMounts: []
- extraPorts: []
- env: []
nginx:
enabled: true
@@ -977,13 +325,13 @@
auth_orgs: [] # a list of tenants
basicAuthSecretName: "nginx-htpasswd"
setHeaders:
- X-Scope-Org-Id: $remote_user
+ X-Scope-OrgID: $remote_user
requests:
cpu: 100m
memory: 256Mi
store_gateway:
- replicas: 1
+ replicas: 2
affinity:
podAntiAffinity:
@@ -998,6 +346,14 @@
- store-gateway
topologyKey: 'kubernetes.io/hostname'
+ extraArgs:
+ "config.expand-env": true
+ env:
+ - name: GCS_SERVICE_ACCOUNT
+ valueFrom:
+ secretKeyRef:
+ name: gcs-service-account
+ key: key
persistentVolume:
enabled: true
@@ -1008,41 +364,217 @@
podDisruptionBudget:
maxUnavailable: 1
+ serviceMonitor:
+ enabled: true
+ # cortex-mixin expects job to be namespace/component
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
+
compactor:
enabled: true
replicas: 1
- affinity:
- podAntiAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: app.kubernetes.io/component
- operator: In
- values:
- - compactor
- topologyKey: 'kubernetes.io/hostname'
-
persistentVolume:
enabled: true
size: 200Gi
storageClass: premium-rwo
- strategy:
- type: RollingUpdate
-
- terminationGracePeriodSeconds: 240
+ serviceMonitor:
+ enabled: true
+ # cortex-mixin expects job to be namespace/component
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
+
+ extraArgs:
+ "config.expand-env": true
+ env:
+ - name: GCS_SERVICE_ACCOUNT
+ valueFrom:
+ secretKeyRef:
+ name: gcs-service-account
+ key: key
+memcached-frontend:
+ enabled: true
+ architecture: "high-availability"
+ replicaCount: 2
podDisruptionBudget:
- maxUnavailable: 1
+ create: true
+ resources:
+ limits:
+ memory: 1120Mi
+ cpu: 1
+ requests:
+ memory: 256Mi
+ cpu: 250m
+ extraEnv:
+ # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage
+ - name: MEMCACHED_CACHE_SIZE
+ value: "1024"
+ # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "1024"
+ # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests.
+ # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of
+ # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values.
+ - name: MEMCACHED_THREADS
+ value: "4"
+ metrics:
+ enabled: true
+ resources:
+ limits:
+ cpu: 200m
+ memory: 128Mi
+ serviceMonitor:
+ enabled: true
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
+
+memcached-blocks-index:
+ architecture: "high-availability"
+ replicaCount: 2
+ podDisruptionBudget:
+ create: true
+ resources:
+ limits:
+ memory: 1120Mi
+ cpu: 1
+ requests:
+ memory: 256Mi
+ cpu: 250m
+ extraEnv:
+ # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage
+ - name: MEMCACHED_CACHE_SIZE
+ value: "1024"
+ # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "1024"
+ # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests.
+ # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of
+ # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values.
+ - name: MEMCACHED_THREADS
+ value: "4"
+ metrics:
+ enabled: true
+ resources:
+ limits:
+ cpu: 200m
+ memory: 128Mi
+ serviceMonitor:
+ enabled: true
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
+
+memcached-blocks:
+ architecture: "high-availability"
+ replicaCount: 2
+ podDisruptionBudget:
+ create: true
+ resources:
+ limits:
+ memory: 1120Mi
+ cpu: 1
+ requests:
+ memory: 1120Mi
+ cpu: 250m
+ extraEnv:
+ # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage
+ - name: MEMCACHED_CACHE_SIZE
+ value: "1024"
+ # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "1024"
+ # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests.
+ # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of
+ # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values.
+ - name: MEMCACHED_THREADS
+ value: "4"
+ metrics:
+ enabled: true
+ resources:
+ limits:
+ cpu: 200m
+ memory: 128Mi
+ serviceMonitor:
+ enabled: true
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
+
+memcached-blocks-metadata:
+ # enabled/disabled via the tags.blocks-storage-memcached boolean
+ architecture: "high-availability"
+ replicaCount: 2
+ podDisruptionBudget:
+ create: true
+ resources:
+ limits:
+ memory: 1120Mi
+ cpu: 1
+ requests:
+ memory: 256Mi
+ cpu: 250m
+ extraEnv:
+ # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage
+ - name: MEMCACHED_CACHE_SIZE
+ value: "1024"
+ # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "1024"
+ # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests.
+ # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of
+ # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values.
+ - name: MEMCACHED_THREADS
+ value: "4"
+ metrics:
+ enabled: true
+ resources:
+ limits:
+ cpu: 200m
+ memory: 128Mi
+ serviceMonitor:
+ enabled: true
+ relabelings:
+ - sourceLabels: [ __meta_kubernetes_service_label_cluster ]
+ regex: (.*)
+ replacement: $1
+ targetLabel: cluster
+ action: replace
+ - sourceLabels: [ __meta_kubernetes_namespace, __meta_kubernetes_service_label_app_kubernetes_io_component ]
+ targetLabel: job
+ separator: "/"
-configsdb_postgresql:
- enabled: false
- uri:
- auth:
- password:
- existing_secret:
- name:
- key:
image:
tag: "v1.11.0"
ingress:
enabled: true
ingressClass:
enabled: true
name: "nginx"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: domain.example.org
paths:
- /
tls:
- hosts:
- domain.example.org
secretName: somesecret
config:
auth_enabled: true
limits:
reject_old_samples_max_age: 12h
storage:
engine: blocks
blocks_storage:
backend: gcs
gcs:
bucket_name: cortex-bucket
service_account: |
{
"type": "service_account",
... SERVICE ACCOUNT CREDENTIALS
}
tsdb:
dir: /data/tsdb
bucket_store:
index_cache:
backend: memcached
memcached:
addresses: dns+memcached-blocks-index.cortex.svc:11211
chunks_cache:
backend: memcached
memcached:
addresses: dns+memcached-blocks.cortex.svc:11211
metadata_cache:
backend: memcached
memcached:
addresses: dns+memcached-blocks-metadata.cortex.svc:11211
sync_dir: /data/tsdb-sync
bucket_index:
enabled: true
# -- https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config
store_gateway:
sharding_enabled: false
distributor:
ha_tracker:
enable_ha_tracker: true
kvstore:
store: consul
consul:
host: consul-consul-server.consul.svc:8500
shard_by_all_labels: true
pool:
health_check_ingesters: true
memberlist:
bind_port: 7946
# -- the service name of the memberlist
# if using memberlist discovery
join_members:
- '{{ include "cortex.fullname" $ }}-memberlist'
querier:
active_query_tracker_dir: /data/active-query-tracker
# -- Maximum lookback beyond which queries are not sent to ingester. 0 means all
# queries are sent to ingester. Ingesters by default have no data older than 12 hours,
# so we can safely set this 13 hours
query_ingesters_within: 13h
# -- The time after which a metric should be queried from storage and not just
# ingesters.
query_store_after: 12h
# -- Comma separated list of store-gateway addresses in DNS Service Discovery
# format. This option should is set automatically when using the blocks storage and the
# store-gateway sharding is disabled (when enabled, the store-gateway instances
# form a ring and addresses are picked from the ring).
# @default -- automatic
store_gateway_addresses: |-
{{ if and (eq .Values.config.storage.engine "blocks") (not .Values.config.store_gateway.sharding_enabled) -}}
dns+{{ include "cortex.storeGatewayFullname" $ }}-headless:9095
{{- end }}
query_range:
split_queries_by_interval: 24h
align_queries_with_step: true
cache_results: true
results_cache:
cache:
memcached:
expiration: 1h
memcached_client:
timeout: 1s
addresses: dns+memcached-frontend.cortex.svc:11211
ruler:
enable_alertmanager_discovery: false
# -- Enable the experimental ruler config api.
enable_api: true
ruler_storage:
backend: gcs
gcs:
bucket_name: cortex-bucket
service_account: |
{
"type": "service_account",
... SERVICE ACCOUNT CREDENTIALS
}
runtime_config:
file: /etc/cortex-runtime-config/runtime_config.yaml
alertmanager:
# -- Enable the experimental alertmanager config api.
enable_api: false
external_url: '/api/prom/alertmanager'
alertmanager_storage:
backend: gcs
gcs:
bucket_name: cortex-bucket
service_account: |
{
"type": "service_account",
... SERVICE ACCOUNT CREDENTIALS
}
frontend:
log_queries_longer_than: 10s
ingester:
lifecycler:
ring:
kvstore:
store: consul
consul:
host: consul-consul-server.consul.svc:8500
runtimeconfigmap:
# -- If true, a configmap for the `runtime_config` will be created.
# If false, the configmap _must_ exist already on the cluster or pods will fail to create.
create: true
annotations: {}
# -- https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file
runtime_config: {}
alertmanager:
enabled: true
replicas: 1
statefulSet:
enabled: true
serviceAccount:
# -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
name: ""
persistentVolume:
enabled: true
size: 10Gi
storageClass: standard-rwo
startupProbe:
httpGet:
path: /ready
port: http-metrics
failureThreshold: 10
livenessProbe:
httpGet:
path: /ready
port: http-metrics
readinessProbe:
httpGet:
path: /ready
port: http-metrics
securityContext: {}
containerSecurityContext:
enabled: true
readOnlyRootFilesystem: true
# -- Tolerations for pod assignment
# ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# -- If not set then a PodDisruptionBudget will not be created
podDisruptionBudget:
maxUnavailable: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
statefulStrategy:
type: RollingUpdate
terminationGracePeriodSeconds: 60
# -- Init containers to be added to the cortex pod.
initContainers: []
# -- Additional containers to be added to the cortex pod.
extraContainers: []
# -- Additional volumes to the cortex pod.
extraVolumes: []
# -- Extra volume mounts that will be added to the cortex container
extraVolumeMounts: []
# -- Additional ports to the cortex services. Useful to expose extra container ports.
extraPorts: []
# -- Extra env variables to pass to the cortex container
env: []
# -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
sidecar:
image:
repository: quay.io/kiwigrid/k8s-sidecar
tag: 1.10.7
sha: ""
imagePullPolicy: IfNotPresent
resources: {}
# -- skipTlsVerify Set to true to skip tls verification for kube api calls
skipTlsVerify: false
enableUniqueFilenames: false
enabled: false
label: cortex_alertmanager
watchMethod: null
labelValue: null
folder: /data
defaultFolderName: null
searchNamespace: null
folderAnnotation: null
containerSecurityContext:
enabled: true
readOnlyRootFilesystem: true
distributor:
replicas: 2
service:
annotations: {}
labels: {}
serviceAccount:
# -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
name: ""
serviceMonitor:
enabled: false
additionalLabels: {}
relabelings: []
metricRelabelings: []
# -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
extraEndpointSpec: {}
resources: {}
# -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
extraArgs: {}
# -- Pod Labels
podLabels: {}
# -- Pod Annotations
podAnnotations:
prometheus.io/scrape: 'true'
prometheus.io/port: 'http-metrics'
nodeSelector: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- distributor
topologyKey: 'kubernetes.io/hostname'
annotations: {}
autoscaling:
# -- Creates a HorizontalPodAutoscaler for the distributor pods.
enabled: false
minReplicas: 2
maxReplicas: 30
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 0 # 80
# -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior
behavior: {}
persistentVolume:
subPath:
startupProbe:
httpGet:
path: /ready
port: http-metrics
failureThreshold: 10
livenessProbe:
httpGet:
path: /ready
port: http-metrics
readinessProbe:
httpGet:
path: /ready
port: http-metrics
securityContext: {}
containerSecurityContext:
enabled: true
readOnlyRootFilesystem: true
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
terminationGracePeriodSeconds: 60
tolerations: []
podDisruptionBudget:
maxUnavailable: 1
initContainers: []
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
extraPorts: []
env: []
lifecycle: {}
ingester:
replicas: 3
statefulSet:
# -- If true, use a statefulset instead of a deployment for pod management.
# This is useful when using WAL
enabled: false
# -- ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details
podManagementPolicy: OrderedReady
service:
annotations: {}
labels: {}
serviceAccount:
name:
serviceMonitor:
enabled: false
additionalLabels: {}
relabelings: []
metricRelabelings: []
# -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
extraEndpointSpec: {}
resources: {}
# -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
extraArgs: {}
# -- Pod Labels
podLabels: {}
# -- Pod Annotations
podAnnotations:
prometheus.io/scrape: 'true'
prometheus.io/port: 'http-metrics'
nodeSelector: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- ingester
topologyKey: 'kubernetes.io/hostname'
annotations: {}
autoscaling:
enabled: false
minReplicas: 3
maxReplicas: 30
targetMemoryUtilizationPercentage: 80
behavior:
scaleDown:
# -- see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details
policies:
- type: Pods
value: 1
# set to no less than 2x the maximum between -blocks-storage.bucket-store.sync-interval and -compactor.cleanup-interval
periodSeconds: 1800
# -- uses metrics from the past 1h to make scaleDown decisions
stabilizationWindowSeconds: 3600
scaleUp:
# -- This default scaleup policy allows adding 1 pod every 30 minutes.
# Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior
policies:
- type: Pods
value: 1
periodSeconds: 1800
lifecycle:
# -- The /shutdown preStop hook is recommended as part of the ingester
# scaledown process, but can be removed to optimize rolling restarts in
# instances that will never be scaled down or when using chunks storage
# with WAL disabled.
# https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down
preStop:
httpGet:
path: "/ingester/shutdown"
port: http-metrics
persistentVolume:
# -- If true and ingester.statefulSet.enabled is true,
# Ingester will create/use a Persistent Volume Claim
# If false, use emptyDir
enabled: true
# -- Ingester data Persistent Volume Claim annotations
annotations: {}
# -- Ingester data Persistent Volume access modes
# Must match those of existing PV or dynamic provisioner
# Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
accessModes:
- ReadWriteOnce
# -- Ingester data Persistent Volume size
size: 2Gi
# -- Subdirectory of Ingester data Persistent Volume to mount
# Useful if the volume's root directory is not empty
subPath: ''
# -- Ingester data Persistent Volume Storage Class
# If defined, storageClassName: <storageClass>
# If set to "-", storageClassName: "", which disables dynamic provisioning
# If undefined (the default) or set to null, no storageClassName spec is
# set, choosing the default provisioner.
storageClass: null
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 30
httpGet:
path: /ready
port: http-metrics
scheme: HTTP
# -- Liveness probes for ingesters are not recommended.
# Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters
livenessProbe: {}
readinessProbe:
httpGet:
path: /ready
port: http-metrics
securityContext: {}
containerSecurityContext:
enabled: true
readOnlyRootFilesystem: true
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
statefulStrategy:
type: RollingUpdate
terminationGracePeriodSeconds: 240
tolerations: []
podDisruptionBudget:
maxUnavailable: 1
initContainers: []
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
extraPorts: []
env: []
ruler:
enabled: true
replicas: 1
service:
annotations: {}
labels: {}
serviceAccount:
# -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
name: ""
serviceMonitor:
enabled: false
additionalLabels: {}
relabelings: []
metricRelabelings: []
# -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
extraEndpointSpec: {}
resources: {}
# -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
extraArgs: {}
# -- Pod Labels
podLabels: {}
# -- Pod Annotations
podAnnotations:
prometheus.io/scrape: 'true'
prometheus.io/port: 'http-metrics'
nodeSelector: {}
affinity: {}
annotations: {}
persistentVolume:
subPath:
startupProbe:
httpGet:
path: /ready
port: http-metrics
failureThreshold: 10
livenessProbe:
httpGet:
path: /ready
port: http-metrics
readinessProbe:
httpGet:
path: /ready
port: http-metrics
securityContext: {}
containerSecurityContext:
enabled: true
readOnlyRootFilesystem: true
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
terminationGracePeriodSeconds: 180
tolerations: []
podDisruptionBudget:
maxUnavailable: 1
initContainers: []
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
extraPorts: []
env: []
# -- allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html
directories: {}
# -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
sidecar:
image:
repository: quay.io/kiwigrid/k8s-sidecar
tag: 1.10.7
sha: ""
imagePullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 50m
# memory: 50Mi
# skipTlsVerify Set to true to skip tls verification for kube api calls
# skipTlsVerify: true
enableUniqueFilenames: false
enabled: false
# -- label that the configmaps with rules are marked with
label: cortex_rules
watchMethod: null
# -- value of label that the configmaps with rules are set to
labelValue: null
# -- folder in the pod that should hold the collected rules (unless `defaultFolderName` is set)
folder: /tmp/rules
# -- The default folder name, it will create a subfolder under the `folder` and put rules in there instead
defaultFolderName: null
# -- If specified, the sidecar will search for rules config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# -- If specified, the sidecar will look for annotation with this name to create folder and put graph here.
# You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
folderAnnotation: null
containerSecurityContext:
enabled: true
readOnlyRootFilesystem: true
querier:
replicas: 2
service:
annotations: {}
labels: {}
serviceAccount:
# -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
name: ""
serviceMonitor:
enabled: false
additionalLabels: {}
relabelings: []
metricRelabelings: []
# -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
extraEndpointSpec: {}
resources: {}
# -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
extraArgs: {}
# -- Pod Labels
podLabels: {}
# -- Pod Annotations
podAnnotations:
prometheus.io/scrape: 'true'
prometheus.io/port: 'http-metrics'
nodeSelector: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- querier
topologyKey: 'kubernetes.io/hostname'
annotations: {}
autoscaling:
# -- Creates a HorizontalPodAutoscaler for the querier pods.
enabled: false
minReplicas: 2
maxReplicas: 30
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 0 # 80
# -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior
behavior: {}
persistentVolume:
subPath:
startupProbe:
httpGet:
path: /ready
port: http-metrics
failureThreshold: 10
livenessProbe:
httpGet:
path: /ready
port: http-metrics
readinessProbe:
httpGet:
path: /ready
port: http-metrics
securityContext: {}
containerSecurityContext:
enabled: true
readOnlyRootFilesystem: true
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
terminationGracePeriodSeconds: 180
tolerations: []
podDisruptionBudget:
maxUnavailable: 1
initContainers: []
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
extraPorts: []
env: []
lifecycle: {}
query_frontend:
replicas: 2
service:
annotations: {}
labels: {}
serviceAccount:
# -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
name: ""
serviceMonitor:
enabled: false
additionalLabels: {}
relabelings: []
metricRelabelings: []
# -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
extraEndpointSpec: {}
resources: {}
# -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
extraArgs: {}
# -- Pod Labels
podLabels: {}
# -- Pod Annotations
podAnnotations:
prometheus.io/scrape: 'true'
prometheus.io/port: 'http-metrics'
nodeSelector: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- query-frontend
topologyKey: 'kubernetes.io/hostname'
annotations: {}
persistentVolume:
subPath:
startupProbe:
httpGet:
path: /ready
port: http-metrics
failureThreshold: 10
livenessProbe:
httpGet:
path: /ready
port: http-metrics
readinessProbe:
httpGet:
path: /ready
port: http-metrics
securityContext: {}
containerSecurityContext:
enabled: true
readOnlyRootFilesystem: true
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
terminationGracePeriodSeconds: 180
tolerations: []
podDisruptionBudget:
maxUnavailable: 1
initContainers: []
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
extraPorts: []
env: []
lifecycle: {}
table_manager:
replicas: 1
service:
annotations: {}
labels: {}
serviceAccount:
# -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
name: ""
serviceMonitor:
enabled: false
additionalLabels: {}
relabelings: []
metricRelabelings: []
# -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
extraEndpointSpec: {}
resources: {}
# -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
extraArgs: {}
# -- Pod Labels
podLabels: {}
# -- Pod Annotations
podAnnotations:
prometheus.io/scrape: 'true'
prometheus.io/port: 'http-metrics'
nodeSelector: {}
affinity: {}
annotations: {}
persistentVolume:
subPath:
startupProbe:
httpGet:
path: /ready
port: http-metrics
failureThreshold: 10
livenessProbe:
httpGet:
path: /ready
port: http-metrics
readinessProbe:
httpGet:
path: /ready
port: http-metrics
securityContext: {}
containerSecurityContext:
enabled: true
readOnlyRootFilesystem: true
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
terminationGracePeriodSeconds: 180
tolerations: []
podDisruptionBudget:
maxUnavailable: 1
initContainers: []
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
extraPorts: []
env: []
configs:
enabled: false
replicas: 1
service:
annotations: {}
labels: {}
serviceAccount:
# -- "" disables the individual serviceAccount and uses the global serviceAccount for that component
name: ""
serviceMonitor:
enabled: false
additionalLabels: {}
relabelings: []
metricRelabelings: []
# -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
extraEndpointSpec: {}
resources: {}
# -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error)
extraArgs: {}
# -- Pod Labels
podLabels: {}
# -- Pod Annotations
podAnnotations:
prometheus.io/scrape: 'true'
prometheus.io/port: 'http-metrics'
nodeSelector: {}
affinity: {}
annotations: {}
persistentVolume:
subPath:
startupProbe:
httpGet:
path: /ready
port: http-metrics
failureThreshold: 10
livenessProbe:
httpGet:
path: /ready
port: http-metrics
readinessProbe:
httpGet:
path: /ready
port: http-metrics
securityContext: {}
containerSecurityContext:
enabled: true
readOnlyRootFilesystem: true
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
terminationGracePeriodSeconds: 180
tolerations: []
podDisruptionBudget:
maxUnavailable: 1
initContainers: []
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
extraPorts: []
env: []
nginx:
enabled: true
replicas: 2
config:
client_max_body_size: 5M
auth_orgs: [] # a list of tenants
basicAuthSecretName: "nginx-htpasswd"
setHeaders:
X-Scope-OrgID: $remote_user
requests:
cpu: 100m
memory: 256Mi
store_gateway:
replicas: 1
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- store-gateway
topologyKey: 'kubernetes.io/hostname'
persistentVolume:
enabled: true
size: 120Gi
subPath: ''
storageClass: premium-rwo
podDisruptionBudget:
maxUnavailable: 1
compactor:
enabled: true
replicas: 1
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- compactor
topologyKey: 'kubernetes.io/hostname'
persistentVolume:
enabled: true
size: 200Gi
storageClass: premium-rwo
strategy:
type: RollingUpdate
terminationGracePeriodSeconds: 240
podDisruptionBudget:
maxUnavailable: 1
configsdb_postgresql:
enabled: false
uri:
auth:
password:
existing_secret:
name:
key:
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment