Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
Telegraf too long config file (32769 chars)
# Telegraf Configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 10000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Override default hostname, if empty use os.Hostname()
hostname = ""
#
# OUTPUTS:
#
# Configuration for Amon Server to send metrics to.
[[outputs.amon]]
## Amon Server Key
server_key = "my-server-key" # required.
## Amon Instance URL
amon_instance = "https://youramoninstance" # required
## Connection timeout.
# timeout = "5s"
# Configuration for the AMQP server to send metrics to
[[outputs.amqp]]
## AMQP url
url = "amqp://localhost:5672/influxdb"
## AMQP exchange
exchange = "telegraf"
## Telegraf tag to use as a routing key
## ie, if this tag exists, it's value will be used as the routing key
routing_tag = "host"
## InfluxDB retention policy
# retention_policy = "default"
## InfluxDB database
# database = "telegraf"
## InfluxDB precision
# precision = "s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output. This can be "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
# Configuration for AWS CloudWatch output.
[[outputs.cloudwatch]]
## Amazon REGION
region = 'us-east-1'
## Namespace for the CloudWatch MetricDatums
namespace = 'InfluxData/Telegraf'
# Configuration for DataDog API to send metrics to.
[[outputs.datadog]]
## Datadog API key
apikey = "my-secret-key" # required.
## Connection timeout.
# timeout = "5s"
# Send telegraf metrics to file(s)
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output. This can be "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
# Configuration for Graphite server to send metrics to
[[outputs.graphite]]
## TCP endpoint for your graphite instance.
servers = ["localhost:2003"]
## Prefix metrics name
prefix = ""
## timeout in seconds for the write connection to graphite
timeout = 2
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
## The full HTTP or UDP endpoint URL for your InfluxDB instance.
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
## The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required
## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
## note: using "s" precision greatly improves InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
# Configuration for the Kafka server to send metrics to
[[outputs.kafka]]
## URLs of kafka brokers
brokers = ["localhost:9092"]
## Kafka topic for producer messages
topic = "telegraf"
## Telegraf tag to use as a routing key
## ie, if this tag exists, it's value will be used as the routing key
routing_tag = "host"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output. This can be "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
# Configuration for the AWS Kinesis output.
[[outputs.kinesis]]
## Amazon REGION of kinesis endpoint.
region = "ap-southeast-2"
## Kinesis StreamName must exist prior to starting telegraf.
streamname = "StreamName"
## PartitionKey as used for sharding data.
partitionkey = "PartitionKey"
## format of the Data payload in the kinesis PutRecord, supported
## String and Custom.
format = "string"
## debug will show upstream aws messages.
debug = false
# Configuration for Librato API to send metrics to.
[[outputs.librato]]
## Librator API Docs
## http://dev.librato.com/v1/metrics-authentication
## Librato API user
api_user = "telegraf@influxdb.com" # required.
## Librato API token
api_token = "my-secret-token" # required.
## Tag Field to populate source attribute (optional)
## This is typically the _hostname_ from which the metric was obtained.
source_tag = "hostname"
## Connection timeout.
# timeout = "5s"
# Configuration for MQTT server to send metrics to
[[outputs.mqtt]]
servers = ["localhost:1883"] # required.
## MQTT outputs send metrics to this topic format
## "<topic_prefix>/<hostname>/<pluginname>/"
## ex: prefix/web01.example.com/mem
topic_prefix = "telegraf"
## username and password to connect MQTT server.
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output. This can be "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
# Send telegraf measurements to NSQD
[[outputs.nsq]]
## Location of nsqd instance listening on TCP
server = "localhost:4150"
## NSQ topic for producer messages
topic = "telegraf"
## Data format to output. This can be "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
# Configuration for OpenTSDB server to send metrics to
[[outputs.opentsdb]]
## prefix for metrics keys
prefix = "my.specific.prefix."
## Telnet Mode ##
## DNS name of the OpenTSDB server in telnet mode
host = "opentsdb.example.com"
## Port of the OpenTSDB server in telnet mode
port = 4242
## Debug true - Prints OpenTSDB communication
debug = false
# Configuration for the Prometheus client to spawn
[[outputs.prometheus_client]]
## Address to listen on
# listen = ":9126"
# Configuration for the Riemann server to send metrics to
[[outputs.riemann]]
## URL of server
url = "localhost:5555"
## transport protocol to use either tcp or udp
transport = "tcp"
## separator to use between input name and field name in Riemann service name
separator = " "
#
# INPUTS:
#
# Read stats from an aerospike server
[[inputs.aerospike]]
## Aerospike servers to connect to (with port)
## This plugin will query all namespaces the aerospike
## server has configured and get stats for them.
servers = ["localhost:3000"]
# Read Apache status information (mod_status)
[[inputs.apache]]
## An array of Apache status URI to gather stats.
urls = ["http://localhost/server-status?auto"]
# Read metrics of bcache from stats_total and dirty_data
[[inputs.bcache]]
## Bcache sets path
## If not specified, then default is:
bcachePath = "/sys/fs/bcache"
## By default, telegraf gather stats for all bcache devices
## Setting devices will restrict the stats to the specified
## bcache devices.
bcacheDevs = ["bcache0"]
# Read CouchDB Stats from one or more servers
[[inputs.couchdb]]
## Works with CouchDB stats endpoints out of the box
## Multiple HOSTs from which to read CouchDB stats:
hosts = ["http://localhost:8086/_stats"]
# Read metrics about cpu usage
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## Comment this line if you want the raw CPU time metrics
fielddrop = ["time_*"]
# Read metrics about disk usage by mount point
[[inputs.disk]]
## By default, telegraf gather stats for all mountpoints.
## Setting mountpoints will restrict the stats to the specified mountpoints.
# mount_points = ["/"]
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
## present on /run, /var/run, /dev/shm or /dev).
ignore_fs = ["tmpfs", "devtmpfs"]
# Read metrics about disk IO by device
[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb"]
## Uncomment the following line if you do not need disk serial numbers.
# skip_serial_number = true
# Read metrics from one or many disque servers
[[inputs.disque]]
## An array of URI to gather stats about. Specify an ip or hostname
## with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
## 10.0.0.1:10000, etc.
## If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Query given DNS server and gives statistics
[[inputs.dns_query]]
## servers to query
servers = ["8.8.8.8"] # required
## Domains or subdomains to query. "."(root) is default
domains = ["."] # optional
## Query record type. Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. Default is "NS"
record_type = "A" # optional
## Dns server port. 53 is default
port = 53 # optional
## Query timeout in seconds. Default is 2 seconds
timeout = 2 # optional
# Read metrics about docker containers
[[inputs.docker]]
## Docker Endpoint
## To use TCP, set endpoint = "tcp://[ip]:[port]"
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Only collect metrics for these containers, collect all if empty
container_names = []
# Read statistics from one or many dovecot servers
[[inputs.dovecot]]
## specify dovecot servers via an address:port list
## e.g.
## localhost:24242
##
## If no servers are specified, then localhost is used as the host.
servers = ["localhost:24242"]
## Only collect metrics for these domains, collect all if empty
domains = []
# Read stats from one or more Elasticsearch servers or clusters
[[inputs.elasticsearch]]
## specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"]
## set local to false when you want to read the indices stats from all nodes
## within the cluster
local = true
## set cluster_health to true when you want to also obtain cluster level stats
cluster_health = false
# Read metrics from one or more commands that can output to stdout
[[inputs.exec]]
## Commands array
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
# Read metrics of haproxy, via socket or csv stats page
[[inputs.haproxy]]
## An array of address to gather stats about. Specify an ip on hostname
## with optional port. ie localhost, 10.10.3.33:1936, etc.
## If no servers are specified, then default to 127.0.0.1:1936
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
## Or you can also use local socket(not work yet)
## servers = ["socket://run/haproxy/admin.sock"]
# Read flattened metrics from one or more JSON HTTP endpoints
[[inputs.httpjson]]
## NOTE This plugin only reads numerical measurements, strings and booleans
## will be ignored.
## a name for the service being polled
name = "webserver_stats"
## URL of each server in the service's cluster
servers = [
"http://localhost:9999/stats/",
"http://localhost:9998/stats/",
]
## HTTP method to use: GET or POST (case-sensitive)
method = "GET"
## List of tag names to extract from top-level of JSON server response
# tag_keys = [
# "my_tag_1",
# "my_tag_2"
# ]
## HTTP parameters (all values must be strings)
[inputs.httpjson.parameters]
event_type = "cpu_spike"
threshold = "0.75"
## HTTP Header parameters (all values must be strings)
# [inputs.httpjson.headers]
# X-Auth-Token = "my-xauth-token"
# apiVersion = "v1"
# Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
[[inputs.influxdb]]
## Works with InfluxDB debug endpoints out of the box,
## but other services can use this format too.
## See the influxdb plugin's README for more details.
## Multiple URLs from which to read InfluxDB-formatted JSON
urls = [
"http://localhost:8086/debug/vars"
]
# Read JMX metrics through Jolokia
[[inputs.jolokia]]
## This is the context root used to compose the jolokia url
context = "/jolokia/read"
## List of servers exposing jolokia read service
[[inputs.jolokia.servers]]
name = "stable"
host = "192.168.103.2"
port = "8180"
# username = "myuser"
# password = "mypassword"
## List of metrics collected on above servers
## Each metric consists in a name, a jmx path and either
## a pass or drop slice attribute.
## This collect all heap memory usage metrics.
[[inputs.jolokia.metrics]]
name = "heap_memory_usage"
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
# Read metrics from a LeoFS Server via SNMP
[[inputs.leofs]]
## An array of URI to gather stats about LeoFS.
## Specify an ip or hostname with port. ie 127.0.0.1:4020
servers = ["127.0.0.1:4021"]
# Read metrics from local Lustre service on OST, MDS
[[inputs.lustre2]]
## An array of /proc globs to search for Lustre stats
## If not specified, the default will work on Lustre 2.5.x
##
# ost_procfiles = [
# "/proc/fs/lustre/obdfilter/*/stats",
# "/proc/fs/lustre/osd-ldiskfs/*/stats"
# ]
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
# Gathers metrics from the /3.0/reports MailChimp API
[[inputs.mailchimp]]
## MailChimp API key
## get from https://admin.mailchimp.com/account/api/
api_key = "" # required
## Reports for campaigns sent more than days_old ago will not be collected.
## 0 means collect all.
days_old = 0
## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
# campaign_id = ""
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Read metrics from one or many memcached servers
[[inputs.memcached]]
## An array of address to gather stats about. Specify an ip on hostname
## with optional port. ie localhost, 10.0.0.1:11211, etc.
servers = ["localhost:11211"]
# unix_sockets = ["/var/run/memcached.sock"]
# Telegraf plugin for gathering metrics from N Mesos masters
[[inputs.mesos]]
# Timeout, in ms.
timeout = 100
# A list of Mesos masters, default value is localhost:5050.
masters = ["localhost:5050"]
# Metrics groups to be collected, by default, all enabled.
master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"]
# Read metrics from one or many MongoDB servers
[[inputs.mongodb]]
## An array of URI to gather stats about. Specify an ip or hostname
## with optional port add password. ie,
## mongodb://user:auth_key@10.10.3.30:27017,
## mongodb://10.10.3.33:18832,
## 10.0.0.1:10000, etc.
servers = ["127.0.0.1:27017"]
# Read metrics from one or many mysql servers
[[inputs.mysql]]
## specify servers via a url matching:
## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
## e.g.
## root:passwd@tcp(127.0.0.1:3306)/?tls=false
## root@tcp(127.0.0.1:3306)/?tls=false
##
## If no servers are specified, then localhost is used as the host.
servers = ["tcp(127.0.0.1:3306)/"]
# Read metrics about network interface usage
[[inputs.net]]
## By default, telegraf gathers stats from any up interface (excluding loopback)
## Setting interfaces will tell it to gather these explicit interfaces,
## regardless of status.
##
# interfaces = ["eth0"]
# TCP or UDP 'ping' given url and collect response time in seconds
[[inputs.net_response]]
## Protocol, must be "tcp" or "udp"
protocol = "tcp"
## Server address (default localhost)
address = "github.com:80"
## Set timeout (default 1.0 seconds)
timeout = 1.0
## Set read timeout (default 1.0 seconds)
read_timeout = 1.0
## Optional string sent to the server
# send = "ssh"
## Optional expected string in answer
# expect = "ssh"
# Read TCP metrics such as established, time wait and sockets counts.
[[inputs.netstat]]
# no configuration
# Read Nginx's basic status information (ngx_http_stub_status_module)
[[inputs.nginx]]
## An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/status"]
# Read NSQ topic and channel statistics.
[[inputs.nsq]]
## An array of NSQD HTTP API endpoints
endpoints = ["http://localhost:4151"]
# Read metrics of passenger using passenger-status
[[inputs.passenger]]
## Path of passenger-status.
##
## Plugin gather metric via parsing XML output of passenger-status
## More information about the tool:
## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
##
## If no path is specified, then the plugin simply execute passenger-status
## hopefully it can be found in your PATH
command = "passenger-status -v --show=xml"
# Read metrics of phpfpm, via HTTP status page or socket
[[inputs.phpfpm]]
## An array of addresses to gather stats about. Specify an ip or hostname
## with optional port and path
##
## Plugin can be configured in three modes (either can be used):
## - http: the URL must start with http:// or https://, ie:
## "http://localhost/status"
## "http://192.168.130.1/status?full"
##
## - unixsocket: path to fpm socket, ie:
## "/var/run/php5-fpm.sock"
## or using a custom fpm status path:
## "/var/run/php5-fpm.sock:fpm-custom-status-path"
##
## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
## "fcgi://10.0.0.12:9000/status"
## "cgi://10.0.10.12:9001/status"
##
## Example of multiple gathering from local socket and remove host
## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
urls = ["http://localhost/status"]
# Ping given url(s) and return statistics
[[inputs.ping]]
## NOTE: this plugin forks the ping command. You may need to set capabilities
## via setcap cap_net_raw+p /bin/ping
## urls to ping
urls = ["www.google.com"] # required
## number of pings to send (ping -c <COUNT>)
count = 1 # required
## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
ping_interval = 0.0
## ping timeout, in s. 0 == no timeout (ping -t <TIMEOUT>)
timeout = 0.0
## interface to send ping from (ping -I <INTERFACE>)
interface = ""
# Read metrics from one or many postgresql servers
[[inputs.postgresql]]
## specify address via a url matching:
## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
## or a simple string:
## host=localhost user=pqotest password=... sslmode=... dbname=app_production
##
## All connection parameters are optional.
##
## Without the dbname parameter, the driver will default to a database
## with the same name as the user. This dbname is just for instantiating a
## connection with the server and doesn't restrict the databases we are trying
## to grab metrics for.
##
address = "host=localhost user=postgres sslmode=disable"
## A list of databases to pull metrics about. If not specified, metrics for all
## databases are gathered.
# databases = ["app_production", "testing"]
# Read metrics from one or many PowerDNS servers
[[inputs.powerdns]]
## An array of sockets to gather stats about.
## Specify a path to unix socket.
unix_sockets = ["/var/run/pdns.controlsocket"]
# Monitor process cpu and memory usage
[[inputs.procstat]]
## Must specify one of: pid_file, exe, or pattern
## PID file to monitor process
pid_file = "/var/run/nginx.pid"
## executable name (ie, pgrep <exe>)
# exe = "nginx"
## pattern as argument for pgrep (ie, pgrep -f <pattern>)
# pattern = "nginx"
## user as argument for pgrep (ie, pgrep -u <user>)
# user = "nginx"
## Field name prefix
prefix = ""
# Read metrics from one or many prometheus clients
[[inputs.prometheus]]
## An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
# Reads last_run_summary.yaml file and converts to measurments
[[inputs.puppetagent]]
## Location of puppet last run summary file
location = "/var/lib/puppet/state/last_run_summary.yaml"
# Read metrics from one or many RabbitMQ servers via the management API
[[inputs.rabbitmq]]
url = "http://localhost:15672" # required
# name = "rmq-server-1" # optional tag
# username = "guest"
# password = "guest"
## A list of nodes to pull metrics about. If not specified, metrics for
## all nodes are gathered.
# nodes = ["rabbit@node1", "rabbit@node2"]
# Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
[[inputs.raindrops]]
## An array of raindrops middleware URI to gather stats.
urls = ["http://localhost:8080/_raindrops"]
# Read metrics from one or many redis servers
[[inputs.redis]]
## specify servers via a url matching:
## [protocol://][:password]@address[:port]
## e.g.
## tcp://localhost:6379
## tcp://:password@192.168.99.100
##
## If no servers are specified, then localhost is used as the host.
## If no port is specified, 6379 is used
servers = ["tcp://localhost:6379"]
# Read metrics from one or many RethinkDB servers
[[inputs.rethinkdb]]
## An array of URI to gather stats about. Specify an ip or hostname
## with optional port add password. ie,
## rethinkdb://user:auth_key@10.10.3.30:28105,
## rethinkdb://10.10.3.33:18832,
## 10.0.0.1:10000, etc.
servers = ["127.0.0.1:28015"]
# Read metrics one or many Riak servers
[[inputs.riak]]
# Specify a list of one or more riak http servers
servers = ["http://localhost:8098"]
# Reads oids value from one or many snmp agents
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "192.168.2.2:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# SNMP response timeout
timeout = 2.0 # default 2.0
# SNMP request retries
retries = 2 # default 2
# Which get/bulk do you want to collect for this host
collect = ["mybulk", "sysservices", "sysdescr"]
# Simple list of OIDs to get, in addition to "collect"
get_oids = []
[[inputs.snmp.host]]
address = "192.168.2.3:161"
community = "public"
version = 2
timeout = 2.0
retries = 2
collect = ["mybulk"]
get_oids = [
"ifNumber",
".1.3.6.1.2.1.1.3.0",
]
[[inputs.snmp.get]]
name = "ifnumber"
oid = "ifNumber"
[[inputs.snmp.get]]
name = "interface_speed"
oid = "ifSpeed"
instance = "0"
[[inputs.snmp.get]]
name = "sysuptime"
oid = ".1.3.6.1.2.1.1.3.0"
unit = "second"
[[inputs.snmp.bulk]]
name = "mybulk"
max_repetition = 127
oid = ".1.3.6.1.2.1.1"
[[inputs.snmp.bulk]]
name = "ifoutoctets"
max_repetition = 127
oid = "ifOutOctets"
[[inputs.snmp.host]]
address = "192.168.2.13:161"
#address = "127.0.0.1:161"
community = "public"
version = 2
timeout = 2.0
retries = 2
#collect = ["mybulk", "sysservices", "sysdescr", "systype"]
collect = ["sysuptime" ]
[[inputs.snmp.host.table]]
name = "iftable3"
include_instances = ["enp5s0", "eth1"]
# SNMP TABLEs
# table without mapping neither subtables
[[inputs.snmp.table]]
name = "iftable1"
oid = ".1.3.6.1.2.1.31.1.1.1"
# table without mapping but with subtables
[[inputs.snmp.table]]
name = "iftable2"
oid = ".1.3.6.1.2.1.31.1.1.1"
sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
# table with mapping but without subtables
[[inputs.snmp.table]]
name = "iftable3"
oid = ".1.3.6.1.2.1.31.1.1.1"
# if empty. get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty, get all subtables
# table with both mapping and subtables
[[inputs.snmp.table]]
name = "iftable4"
oid = ".1.3.6.1.2.1.31.1.1.1"
# if empty get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty get all subtables
# sub_tables could be not "real subtables"
sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
# Read metrics from Microsoft SQL Server
[[inputs.sqlserver]]
## Specify instances to monitor with a list of connection strings.
## All connection parameters are optional.
## By default, the host is localhost, listening on default port, TCP 1433.
## for Windows, the user is the currently running AD user (SSO).
## See https://github.com/denisenkom/go-mssqldb for detailed connection
## parameters.
# servers = [
# "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
# ]
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[inputs.system]]
# no configuration
# Inserts sine and cosine waves for demonstration purposes
[[inputs.trig]]
## Set the amplitude
amplitude = 10.0
# Read Twemproxy stats data
[[inputs.twemproxy]]
## Twemproxy stats address and port (no scheme)
addr = "localhost:22222"
## Monitor pool name
pools = ["redis_pool", "mc_pool"]
# Read metrics of ZFS from arcstats, zfetchstats and vdev_cache_stats
[[inputs.zfs]]
## ZFS kstat path
## If not specified, then default is:
kstatPath = "/proc/spl/kstat/zfs"
## By default, telegraf gather all zfs stats
## If not specified, then default is:
kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
## By default, don't gather zpool stats
poolMetrics = false
# Reads 'mntr' stats from one or many zookeeper servers
[[inputs.zookeeper]]
## An array of address to gather stats about. Specify an ip or hostname
## with port. ie localhost:2181, 10.0.0.1:2181, etc.
## If no servers are specified, then localhost is used as the host.
## If no port is specified, 2181 is used
servers = [":2181"]
#
# SERVICE INPUTS:
#
# A Github Webhook Event collector
[[inputs.github_webhooks]]
## Address and port to host Webhook listener on
service_address = ":1618"
# Read metrics from Kafka topic(s)
[[inputs.kafka_consumer]]
## topic(s) to consume
topics = ["telegraf"]
## an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
## the name of the consumer group
consumer_group = "telegraf_metricss"
## Offset (must be either "oldest" or "newest")
offset = "oldest"
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
# Read metrics from MQTT topic(s)
[[inputs.mqtt_consumer]]
servers = ["localhost:1883"]
## MQTT QoS, must be 0, 1, or 2
qos = 0
## Topics to subscribe to
topics = [
"telegraf/host01/cpu",
"telegraf/+/mem",
"sensors/#",
]
## username and password to connect MQTT server.
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
# Read metrics from NATS subject(s)
[[inputs.nats_consumer]]
## urls of NATS servers
servers = ["nats://localhost:4222"]
## Use Transport Layer Security
secure = false
## subject(s) to consume
subjects = ["te"]
## name a queue group
queue_group = "tel"
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
# Statsd Server
[[inputs.statsd]]
## Address and port to host UDP listener on
service_address = ":8125"
## Delete gauges every interval (default=false)
delete_gauges = false
## Delete counters every interval (default=false)
delete_counters = false
## Delete sets every interval (default=false)
delete_sets = false
## Delete timings & histograms every interval (default=true)
delete_timings = true
## Percentiles to calculate for timing & histogram stats
percentiles = [90]
## convert measurement names "." to "_" and "-" to "__"
convert_names = true
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.