Lines 24-27 are doing the filtering per docs
# my global config
global:
scrape_interval: 60s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 60s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'my-local-pi'
scrape_interval: 60s
static_configs:
- targets: ['localhost:9100']
remote_write:
- url: https://metric-api.newrelic.com/prometheus/v1/write?prometheus_server=raspberry-pi
bearer_token: <redacted>
write_relabel_configs:
- source_labels: ['__name__', 'instance']
regex: 'node_memory_.*'
action: 'drop'