Skip to content

Instantly share code, notes, and snippets.

@krainboltgreene
Last active February 21, 2024 09:27
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save krainboltgreene/6209955ed4a647e7b936863111c5d62b to your computer and use it in GitHub Desktop.
Save krainboltgreene/6209955ed4a647e7b936863111c5d62b to your computer and use it in GitHub Desktop.
version: "3.7"
x-logging-elk: &logging-elk
# driver: json-file
driver: gelf
options:
gelf-address: udp://172.16.0.38:12201
labels: container_group
x-sentry-environment: &sentry-environment
SENTRY_SECRET_KEY:
SENTRY_POSTGRES_HOST: "sentry-postgres"
SENTRY_DB_NAME: "sentry_production"
SENTRY_MEMCACHED_HOST: "sentry-memcached"
SENTRY_REDIS_HOST: "sentry-redis"
SENTRY_SINGLE_ORGANIZATION: "True"
SENTRY_EMAIL_HOST: "smtp-la.poutineer.com"
SENTRY_SERVER_EMAIL: "sentry@poutineer.com"
services:
screwdrivercd-api:
image: screwdrivercd/screwdriver:stable
labels:
container_group: screwdrivercd
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:rw"
- "./volumes/data/screwdrivercd:/tmp/sd-data/:rw"
environment:
PORT: 8080
VIRTUAL_HOST: screwdrivercd-api.internal.poutineer.com
URI: http://screwdrivercd-api.internal.poutineer.com
ECOSYSTEM_UI: http://screwdrivercd.internal.poutineer.com
ECOSYSTEM_STORE: http://screwdrivercd-store.internal.poutineer.com
DATASTORE_PLUGIN: sequelize
DATASTORE_SEQUELIZE_DIALECT: sqlite
DATASTORE_SEQUELIZE_STORAGE: /tmp/sd-data/storage.db
EXECUTOR_PLUGIN: docker
EXECUTOR_QUEUE_ENABLED: "false"
SECRET_WHITELIST: "[]"
EXECUTOR_DOCKER_DOCKER: |
{
"socketPath": "/var/run/docker.sock"
}
SCM_SETTINGS: |
{
"github": {
"plugin": "github",
"config": {
"username": "sd-buildbot",
"secret": "SUPER-SECRET-SIGNING-THING",
"oauthClientSecret": "02d7eaf8700eb1c316298eec1f728ba16620083e",
"email": "dev-null@screwdriver.cd",
"oauthClientId": "45a965bbc6e4bedf7787",
"gheHost": "github.poutineer.com"
}
}
}
SECRET_JWT_PRIVATE_KEY: |
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDCZwYtN/yNrHefEqLJutZBU5patmAY8ZtBGv8r0E9XAUMR/HL6
YR+YssG3UUch/heVnOxjmoIPVfPgPquruYQQfMKjL3qGCUm3P36i54+zntvKSrx4
OCxMDxECX5F8ioMDkL+W3eGWPULhtXf/VACcKErP7mF4KgdfCZz+Sz/puQIDAQAB
AoGBALiiaLdhyTg1aZdnd7zo+r0ozh0Bwdtu/Omk3l2ahwA3bqASiJFM1VmNzXZy
d4DNd1DdrZYcFRLDq9B7NUjv9YCXywABRKodaj1X8WZd+jKmuZ23Wm/8AIYAwJWz
G/oQiD9m2jt4DiZEJ4Vx6hvb2egHRJx4rFnO3iq3w+3S/wIBAkEA/oe2ZzubOW4G
aNLu9tncyUztPl7Jc6xzuHAump73kqlGDa5aV5AD5GgkL56BuiRPIKmOpghgBMvI
PA7a6fd30QJBAMOGa9GWYBXXRTzOW0DgJkplCs/j2LEu8gOFim4ozdy2c8wJ+Mjw
qdi9e7HogOIblwHBDn3tQwEnrScF/xe5tWkCQGu1WE3FHoGl+pZfNHpHZTPWFsr+
53bIskuJRV9vB8sLULa3YkmNiugb3bw88h6oAMgMmG3QO3msFna7vo2liBECQEJv
XhssuSEyr/flNeRpRRlqE65ngK1Tgi/oJA0iGDCyXc4qZToBa/SVDX55vbEWJs0V
x9LJ10aBMLhdtTtegmECQQDDDqBn9z2bk0PzEDB4c7FOe8ZhOGZQurEEfYpKOZ7z
zFURfUT5E1e8hajN+Wvz+HkJHdkTJxb4S10YR9qvWIy4
-----END RSA PRIVATE KEY-----
SECRET_JWT_PUBLIC_KEY: |
-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDCZwYtN/yNrHefEqLJutZBU5pa
tmAY8ZtBGv8r0E9XAUMR/HL6YR+YssG3UUch/heVnOxjmoIPVfPgPquruYQQfMKj
L3qGCUm3P36i54+zntvKSrx4OCxMDxECX5F8ioMDkL+W3eGWPULhtXf/VACcKErP
7mF4KgdfCZz+Sz/puQIDAQAB
-----END PUBLIC KEY-----
logging:
<<: *logging-elk
screwdriver-ui:
image: screwdrivercd/ui:stable
labels:
container_group: screwdrivercd
environment:
VIRTUAL_HOST: screwdrivercd.internal.poutineer.com
ECOSYSTEM_API: http://screwdrivercd-api.internal.poutineer.com
ECOSYSTEM_STORE: http://screwdrivercd-store.internal.poutineer.com
AVATAR_HOSTNAME: avatars*.githubusercontent.com
logging:
<<: *logging-elk
screwdriver-store:
image: screwdrivercd/store:stable
labels:
container_group: screwdrivercd
environment:
VIRTUAL_HOST: screwdrivercd-store.internal.poutineer.com
ECOSYSTEM_UI: http://screwdrivercd.internal.poutineer.com
URI: http://screwdrivercd-store.internal.poutineer.com
SECRET_JWT_PUBLIC_KEY: |
-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDCZwYtN/yNrHefEqLJutZBU5pa
tmAY8ZtBGv8r0E9XAUMR/HL6YR+YssG3UUch/heVnOxjmoIPVfPgPquruYQQfMKj
L3qGCUm3P36i54+zntvKSrx4OCxMDxECX5F8ioMDkL+W3eGWPULhtXf/VACcKErP
7mF4KgdfCZz+Sz/puQIDAQAB
-----END PUBLIC KEY-----
logging:
<<: *logging-elk
sentry:
image: sentry:9.0.0
labels:
container_group: sentry
ports:
- 9000:9000
depends_on:
- logstash
- sentry-postgres
- sentry-redis
- sentry-memcached
environment:
<<: *sentry-environment
VIRTUAL_HOST: sentry.internal.poutineer.com
logging:
<<: *logging-elk
sentry-cron:
command: sentry run cron
labels:
container_group: sentry
image: sentry:9.0.0
depends_on:
- logstash
- sentry-postgres
- sentry-redis
- sentry-memcached
environment:
<<: *sentry-environment
logging:
<<: *logging-elk
sentry-worker:
command: sentry run worker
labels:
container_group: sentry
image: sentry:9.0.0
depends_on:
- logstash
- sentry-postgres
- sentry-redis
- sentry-memcached
environment:
<<: *sentry-environment
logging:
<<: *logging-elk
sentry-redis:
image: redis:4.0.11-alpine
labels:
container_group: sentry
volumes:
- "./volumes/data/redis/sentry-redis:/data:rw"
depends_on:
- logstash
healthcheck:
test: redis-cli ping
interval: 30s
timeout: 10s
retries: 3
logging:
<<: *logging-elk
sentry-postgres:
image: postgres:10.5-alpine
labels:
container_group: sentry
volumes:
- "./volumes/data/postgres/sentry-postgres:/var/lib/postgresql/data:rw"
- "./volumes/settings/postgres/sentry-postgres/pg_hba.conf:/var/lib/postgresql/data/pg_hba.conf"
- "./volumes/settings/postgres/sentry-postgres/pg_ident.conf:/var/lib/postgresql/data/pg_ident.conf"
- "./volumes/settings/postgres/sentry-postgres/postgresql.conf:/var/lib/postgresql/data/postgresql.conf"
depends_on:
- logstash
healthcheck:
test: pg_isready -U postgres
interval: 10s
timeout: 5s
retries: 5
logging:
<<: *logging-elk
sentry-memcached:
image: memcached:1.5.10-alpine
labels:
container_group: sentry
depends_on:
- logstash
healthcheck:
test: echo stats | nc 127.0.0.1 11211
interval: 10s
retries: 60
logging:
<<: *logging-elk
# Pretty frontend to explore and check out all your logs.
kibana:
image: docker.elastic.co/kibana/kibana-oss:6.4.2
restart: always
labels:
container_group: logging
depends_on:
- elasticsearch
- logstash
volumes:
- "./volumes/settings/kibana/:/usr/share/kibana/config/"
environment:
VIRTUAL_HOST: "kibana.internal.poutineer.com"
NODE_OPTIONS: "--max-old-space-size=200" # fixes memory leak (https://github.com/elastic/kibana/issues/5170)
logging:
<<: *logging-elk
# Storage and search backend. Gets all logs from Logstash and is the backend that Kibana runs on.
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.4.2
command: elasticsearch -Etransport.host=127.0.0.1 -Ebootstrap.memory_lock=false
restart: always
labels:
container_group: logging
volumes:
- "./volumes/settings/elasticsearch/:/usr/share/elasticsearch/config/"
- "./volumes/data/elasticsearch/:/usr/share/elasticsearch/data/"
environment:
ES_JAVA_OPTS: "-Xms512m -Xmx512m"
discovery.type: "single-node"
bootstrap.memory_lock: "true"
ulimits:
memlock:
soft: -1
hard: -1
logging:
<<: *logging-elk
# Aggregates logs and forwards them to Elasticsearch.
logstash:
image: docker.elastic.co/logstash/logstash-oss:6.4.2
restart: always
ports:
- 5044:5044
- 8080:8080
- 9600:9600
- 12201:12201/udp
volumes:
- "./volumes/settings/logstash/config/:/usr/share/logstash/config/"
- "./volumes/settings/logstash/patterns/:/opt/logstash/extra_patterns"
- "./volumes/settings/logstash/pipeline/:/usr/share/logstash/pipeline/"
depends_on:
- elasticsearch
labels:
container_group: logging
networks:
default:
outside:
ipv4_address: 172.16.0.38
logging:
driver: json-file
# <<: *logging-elk
# Pretty frontend to explore and check out all your metrics.
grafana:
image: grafana/grafana:5.3.1
restart: always
labels:
container_group: monitoring
volumes:
- "./volumes/data/grafana/sessions/:/var/lib/grafana/sessions:rw"
- "./volumes/data/grafana/grafana.db:/var/lib/grafana/grafana.db:rw"
- "./volumes/settings/grafana/plugins:/var/lib/grafana/plugins"
- "./volumes/settings/grafana/config/:/etc/grafana/"
depends_on:
- logstash
environment:
GF_SERVER_ROOT_URL: "http://grafana.internal.poutineer.com"
GF_SECURITY_ADMIN_USER: "admin"
VIRTUAL_HOST: "grafana.internal.poutineer.com"
logging:
<<: *logging-elk
# Storage and search backend. Gets all metrics from cAdvisor and Nodeexporter and is the backend that Grafana runs on.
prometheus:
image: prom/prometheus:v2.4.3
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.console.libraries=/etc/prometheus/console_libraries"
- "--web.console.templates=/etc/prometheus/consoles"
- "--storage.tsdb.retention=42h"
- "--web.enable-lifecycle"
restart: always
labels:
container_group: monitoring
depends_on:
- logstash
volumes:
- "./volumes/data/prometheus:/prometheus:rw"
- "./volumes/settings/prometheus:/etc/prometheus:rw"
environment:
VIRTUAL_HOST: "prometheus.internal.poutineer.com"
VIRTUAL_PORT: 9090
logging:
<<: *logging-elk
# Alerting plugin wrapping Prometheus and taking care of your metric alerting needs.
alertmanager:
image: prom/alertmanager:v0.15.2
command:
- "--config.file=/etc/alertmanager/config.yml"
- "--storage.path=/alertmanager"
restart: always
labels:
container_group: monitoring
volumes:
- "./volumes/settings/alertmanager/:/etc/alertmanager/:rw"
- "./volumes/data/alertmanager/:/alertmanager"
depends_on:
- logstash
environment:
VIRTUAL_HOST: "alertmanager.internal.poutineer.com"
logging:
<<: *logging-elk
# Runs on your node(s) and forwards container metrics to Prometheus.
cadvisor:
image: google/cadvisor:v0.31.0
restart: always
labels:
container_group: monitoring
volumes:
- "/:/rootfs:ro"
- "/var/run:/var/run:rw"
- "/sys:/sys:ro"
- "/var/lib/docker/:/var/lib/docker:ro"
depends_on:
- logstash
environment:
VIRTUAL_HOST: "cadvisor.internal.poutineer.com"
logging:
<<: *logging-elk
# Runs on your node(s) and forwards node(host) metrics to Prometheus.
# Partially implemented: https://github.com/uschtwill/docker_monitoring_logging_alerting/pull/44
nodeexporter:
image: prom/node-exporter:v0.16.0
command:
- "--path.procfs=/host/proc"
- "--path.sysfs=/host/sys"
- '--collector.filesystem.ignored-mount-points="^/(sys|proc|dev|host|etc)($$|/)"'
labels:
container_group: monitoring
restart: unless-stopped
user: root
privileged: true
volumes:
- "/proc:/host/proc:ro"
- "/sys:/host/sys:ro"
- "/:/rootfs:ro"
depends_on:
- logstash
logging:
<<: *logging-elk
filebeat:
image: docker.elastic.co/beats/filebeat:6.4.2
command:
- "--strict.perms=false"
restart: always
user: root
labels:
container_group: monitoring
volumes:
- "./volumes/settings/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml:rw"
- "/var/run/docker.sock:/var/run/docker.sock"
# This is needed for filebeat to load container log path as specified in filebeat.yml
- "/var/lib/docker/containers/:/var/lib/docker/containers/:ro"
# This is needed for filebeat to load logs for system and auth modules
- "/var/log/:/var/log/:ro"
depends_on:
- logstash
- elasticsearch
- kibana
# logging:
# <<: *logging-elk
metricbeat:
image: docker.elastic.co/beats/metricbeat:6.4.2
command:
- "--strict.perms=false"
- "-system.hostfs=/hostfs"
user: root
depends_on:
- elasticsearch
- kibana
volumes:
- "./volumes/settings/metricbeat/metricbeat.yml:/usr/share/metricbeat/metricbeat.yml:rw"
- "/proc:/hostfs/proc:ro"
- "/sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro"
- "/:/hostfs:ro"
- "/var/run/docker.sock:/var/run/docker.sock"
logging:
<<: *logging-elk
packetbeat:
image: docker.elastic.co/beats/packetbeat:6.4.2
labels:
container_group: monitoring
command:
- "--strict.perms=false"
user: root
privileged: true
depends_on:
- elasticsearch
- kibana
volumes:
- "./volumes/settings/packetbeat/packetbeat.yml:/usr/share/packetbeat/packetbeat.yml:rw"
- "/var/run/docker.sock:/var/run/docker.sock"
logging:
<<: *logging-elk
apm-server:
image: docker.elastic.co/apm/apm-server:6.4.2
labels:
container_group: monitoring
ports:
- "8200:8200"
- "8201:8200"
depends_on:
- elasticsearch
- kibana
volumes:
- "./volumes/settings/apm-server/apm-server.yml:/usr/share/apm-server/data/config/apm-server.yml:rw"
environment:
apm-server.host: 0.0.0.0
logging:
<<: *logging-elk
nginx-proxy:
image: jwilder/nginx-proxy:alpine
labels:
container_group: utility
ports:
- "80:80"
volumes:
- "./volumes/settings/nginx-proxy/:/etc/nginx/conf.d/:rw"
- "/var/run/docker.sock:/tmp/docker.sock:ro"
environment:
ENABLE_IPV6: "true"
logging:
<<: *logging-elk
networks:
outside:
# Extra-network is necessary to have a dedicated IP for Logstash to forwards log to.
ipam:
driver: default
config:
- subnet: 172.16.0.38/24
#========================== Modules configuration =============================
filebeat.modules:
- module: system
syslog:
auth:
#========================== Filebeat autodiscover ==============================
# Autodiscover allows you to detect changes in the system and spawn new modules or inputs as they happen.
# filebeat.autodiscover:
# providers:
# - type: docker
# templates:
# config:
# - type: docker
# containers.ids:
# - "${data.docker.container.id}"
filebeat.autodiscover:
providers:
- type: docker
templates:
- condition:
contains:
docker.container.image: redis
config:
- module: redis
log:
enabled: true
input:
type: docker
containers.ids:
- "${data.docker.container.id}"
slowlog:
enabled: true
var.hosts: ["${data.host}:${data.port}"]
- condition:
contains:
docker.container.image: postgres
config:
- module: postgresql
log:
enabled: true
- condition:
contains:
docker.container.image: postgres
config:
- module: nginx
access:
enabled: true
error:
enabled: true
#=========================== Filebeat inputs ==============================
filebeat.inputs:
#------------------------------ Docker input --------------------------------
- type: docker
enabled: true
containers.ids:
- "*"
paths:
- /var/lib/docker/containers/${data.docker.container.id}/*.log
# json.message_key: log
# json.add_error_key: true
# json.keys_under_root: true
exclude_lines: ["^\\s+[\\-`('.|_]"] # drop asciiart lines
multiline.pattern: "^\t|^[[:space:]]+(at|...)|^Caused by:"
multiline.match: after
processors:
- add_docker_metadata: ~
- add_cloud_metadata: ~
- add_locale: ~
#------------------------------ Log input --------------------------------
# - type: log
# enabled: true
# paths:
# - '/var/lib/docker/containers/*/*.log'
# json.message_key: log
# json.add_error_key: true
# json.keys_under_root: true
# processors:
# - add_docker_metadata: ~
# - add_cloud_metadata: ~
# - add_locale: ~
# multiline.pattern: "^\t|^[[:space:]]+(at|...)|^Caused by:"
# multiline.match: after
#
# - type: log
# enabled: false
# paths:
# # path to jenkins build logs
# - /var/lib/docker/volumes/jenkins_home/_data/jobs/*/builds/*/log
# json.message_key: log
# json.add_error_key: true
# json.keys_under_root: true
# multiline.pattern: '^[[:space:]]+|^Caused by:'
# multiline.negate: false
# multiline.match: after
#========================== Elasticsearch output ===============================
output.elasticsearch:
hosts: ["elasticsearch:9200"]
#============================== Dashboards =====================================
setup.kibana:
host: "kibana:5601"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment