Skip to content

Instantly share code, notes, and snippets.

@eduardosramos
Last active August 5, 2022 11:58
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save eduardosramos/fb5ba1b1a5358d0229fc28ac9365d054 to your computer and use it in GitHub Desktop.
Save eduardosramos/fb5ba1b1a5358d0229fc28ac9365d054 to your computer and use it in GitHub Desktop.
Monitoramento
job "metrics" {
datacenters = ["dc1"]
group "prometheus" {
volume "prometheus" {
type = "host"
read_only = false
source = "prometheus"
}
network {
mode = "bridge"
port "prometheus" {
static = 9090
to = 9090
}
}
service {
name = "prometheus"
port = "prometheus"
connect {
sidecar_service {}
}
}
task "web" {
template {
change_mode = "noop"
destination = "local/prometheus.yml"
data = <<EOH
global:
scrape_interval: 15s
evaluation_interval: 15s
external_labels:
monitor: 'odontoprev'
rule_files:
- 'alert.rules'
alerting:
alertmanagers:
- scheme: http
static_configs:
- targets:
- "alertmanager:9093"
scrape_configs:
- job_name: 'prometheus'
scrape_interval: 15s
static_configs:
- targets: ['192.168.1.11:9090']
- job_name: 'cadvisor'
scrape_interval: 15s
dns_sd_configs:
- names:
- 'tasks.cadvisor'
type: 'A'
port: 8088
- job_name: 'node-exporter'
scrape_interval: 15s
dns_sd_configs:
- names:
- 'tasks.node-exporter'
type: 'A'
port: 9100
EOH
}
template {
change_mode = "noop"
destination = "local/alert.rules"
data = <<EOH
groups:
- name: odonto
rules:
- alert: service_down
expr: up == 0
for: 2m
labels:
severity: page
EOH
}
driver = "docker"
volume_mount {
volume = "prometheus"
destination = "/prometheus"
read_only = false
}
config {
image = "prom/prometheus:latest"
volumes = [
"local/prometheus.yml:/etc/prometheus/prometheus.yml",
"local/alert.rules:/etc/prometheus/alert.rules"
]
ports = ["prometheus"]
}
}
}
group "grafana" {
volume "grafana" {
type = "host"
read_only = false
source = "grafana"
}
network {
mode = "bridge"
port "http" {
static = 3000
to = 3000
}
}
service {
name = "grafana"
port = "3000"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "prometheus"
local_bind_port = 9090
}
}
}
}
}
task "dashboard" {
driver = "docker"
volume_mount {
volume = "grafana"
destination = "/var/lib/grafana"
read_only = false
}
config {
image = "grafana/grafana:latest"
}
}
}
group "alertmanager" {
volume "alertmanager" {
type = "host"
read_only = false
source = "alertmanager"
}
network {
mode = "bridge"
port "alertmanager" {
static = 9393
to = 9393
}
}
service {
name = "alertmanager"
port = "9393"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "alertmanager"
local_bind_port = 9393
}
}
}
}
}
task "alertmanager" {
template {
change_mode = "noop"
destination = "local/alertmanager.yml"
data = <<EOH
---
global:
# The smarthost and SMTP sender used for mail notifications.
smtp_smarthost: 'localhost:25'
smtp_from: 'alertmanager@example.org'
smtp_auth_username: 'alertmanager'
smtp_auth_password: 'password'
# The directory from which notification templates are read.
templates:
- '/etc/alertmanager/template/*.tmpl'
# The root route on which each incoming alert enters.
route:
group_by: ['alertname', 'cluster', 'service']
group_wait: 30s
group_interval: 5m
repeat_interval: 3h
# A default receiver
receiver: team-X-mails
receivers:
- name: 'team-X-mails'
email_configs:
- to: 'team-X+alerts@example.org'
EOH
}
driver = "docker"
volume_mount {
volume = "alertmanager"
destination = "/etc/alertmanager"
read_only = false
}
config {
image = "prom/alertmanager:latest"
volumes = [
"local/alertmanager.yml:/etc/alertmanager/alertmanager.yml",
]
ports = ["alertmanager"]
}
}
}
group "cadvisor" {
network {
port "cadvisor" {
static = 8080
to = 8080
}
}
service {
name = "cadvisor"
port = "cadvisor"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
task "cadvisor" {
driver = "docker"
config {
image = "gcr.io/cadvisor/cadvisor:latest"
volumes = [
"/:/rootfs:ro",
"/var/run:/var/run:rw",
"/sys:/sys:ro",
"/var/lib/docker/:/var/lib/docker:ro",
"/etc/localtime:/etc/localtime:ro"
]
ports = ["cadvisor"]
}
resources {
cpu = 500 # 500 MHz
memory = 256 # 512M
}
}
}
group "node-exporter" {
network {
port "node-exporter" {
static = 9100
to = 9100
}
}
service {
name = "node-exporter"
port = "node-exporter"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
task "node-exporter" {
driver = "docker"
config {
image = "prom/node-exporter:latest"
volumes = [
"/proc:/host/proc:ro",
"/sys:/host/sys:ro",
"/:/rootfs:ro"
]
ports = ["node-exporter"]
}
resources {
cpu = 500 # 500 MHz
memory = 256 # 512M
}
}
}
}
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
plugin_dir = "/var/nomad/plugins"
server {
enabled = true
bootstrap_expect = 1
}
client {
enabled = true
servers = ["127.0.0.1"]
host_volume "alertmanager" {
path = "/opt/alertmanager"
read_only = false
}
host_volume "prometheus" {
path = "/opt/prometheus"
read_only = false
}
host_volume "grafana" {
path = "/opt/grafana_data"
read_only = false
}
options {
"docker.caps.whitelist" = "ALL"
"docker.volumes.enabled" = "True"
"docker.config.allow_privileged" = "True"
}
}
plugin "docker" {
config {
volumes {
enabled = true
}
}
}
plugin "nomad-driver-podman" {
config {
volumes {
enabled = true
selinuxlabel = "z"
}
}
}
plugin "raw_exec" {
config {
enabled = true
}
}
telemetry {
collection_interval = "10s"
}
ports {
http = 4646
}
plugin "nomad-driver-podman" {
config {
socket_path = "unix:///var/run/podman.sock"
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment