This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Telegraf Configuration | |
# | |
# Telegraf is entirely plugin driven. All metrics are gathered from the | |
# declared inputs, and sent to the declared outputs. | |
# | |
# Plugins must be declared in here to be active. | |
# To deactivate a plugin, comment out the name and any variables. | |
# | |
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config | |
# file would generate. | |
# | |
# Environment variables can be used anywhere in this config file, simply surround | |
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), | |
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) | |
# Global tags can be specified here in key="value" format. | |
[global_tags] | |
# dc = "us-east-1" # will tag all metrics with dc=us-east-1 | |
# rack = "1a" | |
## Environment variables can be used as tags, and throughout the config file | |
# user = "$USER" | |
# Configuration for telegraf agent | |
[agent] | |
## Default data collection interval for all inputs | |
interval = "10s" | |
## Rounds collection interval to 'interval' | |
## ie, if interval="10s" then always collect on :00, :10, :20, etc. | |
round_interval = true | |
## Telegraf will send metrics to outputs in batches of at most | |
## metric_batch_size metrics. | |
## This controls the size of writes that Telegraf sends to output plugins. | |
metric_batch_size = 1000 | |
## Maximum number of unwritten metrics per output. | |
metric_buffer_limit = 10000 | |
## Collection jitter is used to jitter the collection by a random amount. | |
## Each plugin will sleep for a random time within jitter before collecting. | |
## This can be used to avoid many plugins querying things like sysfs at the | |
## same time, which can have a measurable effect on the system. | |
collection_jitter = "0s" | |
## Default flushing interval for all outputs. Maximum flush_interval will be | |
## flush_interval + flush_jitter | |
flush_interval = "10s" | |
## Jitter the flush interval by a random amount. This is primarily to avoid | |
## large write spikes for users running a large number of telegraf instances. | |
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s | |
flush_jitter = "0s" | |
## By default or when set to "0s", precision will be set to the same | |
## timestamp order as the collection interval, with the maximum being 1s. | |
## ie, when interval = "10s", precision will be "1s" | |
## when interval = "250ms", precision will be "1ms" | |
## Precision will NOT be used for service inputs. It is up to each individual | |
## service input to set the timestamp at the appropriate precision. | |
## Valid time units are "ns", "us" (or "µs"), "ms", "s". | |
precision = "" | |
## Log at debug level. | |
# debug = false | |
## Log only error level messages. | |
# quiet = false | |
## Log file name, the empty string means to log to stderr. | |
# logfile = "" | |
## The logfile will be rotated after the time interval specified. When set | |
## to 0 no time based rotation is performed. Logs are rotated only when | |
## written to, if there is no log activity rotation may be delayed. | |
# logfile_rotation_interval = "0d" | |
## The logfile will be rotated when it becomes larger than the specified | |
## size. When set to 0 no size based rotation is performed. | |
# logfile_rotation_max_size = "0MB" | |
## Maximum number of rotated archives to keep, any older logs are deleted. | |
## If set to -1, no archives are removed. | |
# logfile_rotation_max_archives = 5 | |
## Override default hostname, if empty use os.Hostname() | |
hostname = "" | |
## If set to true, do no set the "host" tag in the telegraf agent. | |
omit_hostname = false | |
############################################################################### | |
# OUTPUT PLUGINS # | |
############################################################################### | |
# Configuration for sending metrics to InfluxDB | |
[[outputs.influxdb]] | |
## The full HTTP or UDP URL for your InfluxDB instance. | |
## | |
## Multiple URLs can be specified for a single cluster, only ONE of the | |
## urls will be written to each interval. | |
# urls = ["unix:///var/run/influxdb.sock"] | |
# urls = ["udp://127.0.0.1:8089"] | |
urls = ["http://influxdb:8086"] | |
## The target database for metrics; will be created as needed. | |
## For UDP url endpoint database needs to be configured on server side. | |
database = "telegraf" | |
## The value of this tag will be used to determine the database. If this | |
## tag is not set the 'database' option is used as the default. | |
# database_tag = "" | |
## If true, the database tag will not be added to the metric. | |
# exclude_database_tag = false | |
## If true, no CREATE DATABASE queries will be sent. Set to true when using | |
## Telegraf with a user without permissions to create databases or when the | |
## database already exists. | |
skip_database_creation = true | |
## Name of existing retention policy to write to. Empty string writes to | |
## the default retention policy. Only takes effect when using HTTP. | |
# retention_policy = "" | |
## Write consistency (clusters only), can be: "any", "one", "quorum", "all". | |
## Only takes effect when using HTTP. | |
# write_consistency = "any" | |
## Timeout for HTTP messages. | |
timeout = "5s" | |
## HTTP Basic Auth | |
username = "telegraf" | |
password = "telegrafpass" | |
## HTTP User-Agent | |
user_agent = "telegraf" | |
## UDP payload size is the maximum packet size to send. | |
# udp_payload = "512B" | |
## Optional TLS Config for use on HTTP connections. | |
# tls_ca = "/etc/telegraf/ca.pem" | |
# tls_cert = "/etc/telegraf/cert.pem" | |
# tls_key = "/etc/telegraf/key.pem" | |
## Use TLS but skip chain & host verification | |
# insecure_skip_verify = false | |
## HTTP Proxy override, if unset values the standard proxy environment | |
## variables are consulted to determine which proxy, if any, should be used. | |
# http_proxy = "http://corporate.proxy:3128" | |
## Additional HTTP headers | |
# http_headers = {"X-Special-Header" = "Special-Value"} | |
## HTTP Content-Encoding for write request body, can be set to "gzip" to | |
## compress body or "identity" to apply no encoding. | |
# content_encoding = "identity" | |
## When true, Telegraf will output unsigned integers as unsigned values, | |
## i.e.: "42u". You will need a version of InfluxDB supporting unsigned | |
## integer values. Enabling this option will result in field type errors if | |
## existing data has been written. | |
# influx_uint_support = false | |
# # Configuration for Amon Server to send metrics to. | |
# [[outputs.amon]] | |
# ## Amon Server Key | |
# server_key = "my-server-key" # required. | |
# | |
# ## Amon Instance URL | |
# amon_instance = "https://youramoninstance" # required | |
# | |
# ## Connection timeout. | |
# # timeout = "5s" | |
# # Publishes metrics to an AMQP broker | |
# [[outputs.amqp]] | |
# ## Broker to publish to. | |
# ## deprecated in 1.7; use the brokers option | |
# # url = "amqp://localhost:5672/influxdb" | |
# | |
# ## Brokers to publish to. If multiple brokers are specified a random broker | |
# ## will be selected anytime a connection is established. This can be | |
# ## helpful for load balancing when not using a dedicated load balancer. | |
# brokers = ["amqp://localhost:5672/influxdb"] | |
# | |
# ## Maximum messages to send over a connection. Once this is reached, the | |
# ## connection is closed and a new connection is made. This can be helpful for | |
# ## load balancing when not using a dedicated load balancer. | |
# # max_messages = 0 | |
# | |
# ## Exchange to declare and publish to. | |
# exchange = "telegraf" | |
# | |
# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". | |
# # exchange_type = "topic" | |
# | |
# ## If true, exchange will be passively declared. | |
# # exchange_passive = false | |
# | |
# ## Exchange durability can be either "transient" or "durable". | |
# # exchange_durability = "durable" | |
# | |
# ## Additional exchange arguments. | |
# # exchange_arguments = { } | |
# # exchange_arguments = {"hash_propery" = "timestamp"} | |
# | |
# ## Authentication credentials for the PLAIN auth_method. | |
# # username = "" | |
# # password = "" | |
# | |
# ## Auth method. PLAIN and EXTERNAL are supported | |
# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as | |
# ## described here: https://www.rabbitmq.com/plugins.html | |
# # auth_method = "PLAIN" | |
# | |
# ## Metric tag to use as a routing key. | |
# ## ie, if this tag exists, its value will be used as the routing key | |
# # routing_tag = "host" | |
# | |
# ## Static routing key. Used when no routing_tag is set or as a fallback | |
# ## when the tag specified in routing tag is not found. | |
# # routing_key = "" | |
# # routing_key = "telegraf" | |
# | |
# ## Delivery Mode controls if a published message is persistent. | |
# ## One of "transient" or "persistent". | |
# # delivery_mode = "transient" | |
# | |
# ## InfluxDB database added as a message header. | |
# ## deprecated in 1.7; use the headers option | |
# # database = "telegraf" | |
# | |
# ## InfluxDB retention policy added as a message header | |
# ## deprecated in 1.7; use the headers option | |
# # retention_policy = "default" | |
# | |
# ## Static headers added to each published message. | |
# # headers = { } | |
# # headers = {"database" = "telegraf", "retention_policy" = "default"} | |
# | |
# ## Connection timeout. If not provided, will default to 5s. 0s means no | |
# ## timeout (not recommended). | |
# # timeout = "5s" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## If true use batch serialization format instead of line based delimiting. | |
# ## Only applies to data formats which are not line based such as JSON. | |
# ## Recommended to set to true. | |
# # use_batch_format = false | |
# | |
# ## Content encoding for message payloads, can be set to "gzip" to or | |
# ## "identity" to apply no encoding. | |
# ## | |
# ## Please note that when use_batch_format = false each amqp message contains only | |
# ## a single metric, it is recommended to use compression with batch format | |
# ## for best results. | |
# # content_encoding = "identity" | |
# | |
# ## Data format to output. | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md | |
# # data_format = "influx" | |
# # Send metrics to Azure Application Insights | |
# [[outputs.application_insights]] | |
# ## Instrumentation key of the Application Insights resource. | |
# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" | |
# | |
# ## Timeout for closing (default: 5s). | |
# # timeout = "5s" | |
# | |
# ## Enable additional diagnostic logging. | |
# # enable_diagnostic_logging = false | |
# | |
# ## Context Tag Sources add Application Insights context tags to a tag value. | |
# ## | |
# ## For list of allowed context tag keys see: | |
# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go | |
# # [outputs.application_insights.context_tag_sources] | |
# # "ai.cloud.role" = "kubernetes_container_name" | |
# # "ai.cloud.roleInstance" = "kubernetes_pod_name" | |
# # Send aggregate metrics to Azure Monitor | |
# [[outputs.azure_monitor]] | |
# ## Timeout for HTTP writes. | |
# # timeout = "20s" | |
# | |
# ## Set the namespace prefix, defaults to "Telegraf/<input-name>". | |
# # namespace_prefix = "Telegraf/" | |
# | |
# ## Azure Monitor doesn't have a string value type, so convert string | |
# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows | |
# ## a maximum of 10 dimensions so Telegraf will only send the first 10 | |
# ## alphanumeric dimensions. | |
# # strings_as_dimensions = false | |
# | |
# ## Both region and resource_id must be set or be available via the | |
# ## Instance Metadata service on Azure Virtual Machines. | |
# # | |
# ## Azure Region to publish metrics against. | |
# ## ex: region = "southcentralus" | |
# # region = "" | |
# # | |
# ## The Azure Resource ID against which metric will be logged, e.g. | |
# ## ex: resource_id = "/subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>" | |
# # resource_id = "" | |
# | |
# ## Optionally, if in Azure US Government, China or other sovereign | |
# ## cloud environment, set appropriate REST endpoint for receiving | |
# ## metrics. (Note: region may be unused in this context) | |
# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" | |
# # Publish Telegraf metrics to a Google Cloud PubSub topic | |
# [[outputs.cloud_pubsub]] | |
# ## Required. Name of Google Cloud Platform (GCP) Project that owns | |
# ## the given PubSub topic. | |
# project = "my-project" | |
# | |
# ## Required. Name of PubSub topic to publish metrics to. | |
# topic = "my-topic" | |
# | |
# ## Required. Data format to consume. | |
# ## Each data format has its own unique set of configuration options. | |
# ## Read more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md | |
# data_format = "influx" | |
# | |
# ## Optional. Filepath for GCP credentials JSON file to authorize calls to | |
# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use | |
# ## Application Default Credentials, which is preferred. | |
# # credentials_file = "path/to/my/creds.json" | |
# | |
# ## Optional. If true, will send all metrics per write in one PubSub message. | |
# # send_batched = true | |
# | |
# ## The following publish_* parameters specifically configures batching | |
# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read | |
# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings | |
# | |
# ## Optional. Send a request to PubSub (i.e. actually publish a batch) | |
# ## when it has this many PubSub messages. If send_batched is true, | |
# ## this is ignored and treated as if it were 1. | |
# # publish_count_threshold = 1000 | |
# | |
# ## Optional. Send a request to PubSub (i.e. actually publish a batch) | |
# ## when it has this many PubSub messages. If send_batched is true, | |
# ## this is ignored and treated as if it were 1 | |
# # publish_byte_threshold = 1000000 | |
# | |
# ## Optional. Specifically configures requests made to the PubSub API. | |
# # publish_num_go_routines = 2 | |
# | |
# ## Optional. Specifies a timeout for requests to the PubSub API. | |
# # publish_timeout = "30s" | |
# | |
# ## Optional. If true, published PubSub message data will be base64-encoded. | |
# # base64_data = false | |
# | |
# ## Optional. PubSub attributes to add to metrics. | |
# # [[inputs.pubsub.attributes]] | |
# # my_attr = "tag_value" | |
# # Configuration for AWS CloudWatch output. | |
# [[outputs.cloudwatch]] | |
# ## Amazon REGION | |
# region = "us-east-1" | |
# | |
# ## Amazon Credentials | |
# ## Credentials are loaded in the following order | |
# ## 1) Assumed credentials via STS if role_arn is specified | |
# ## 2) explicit credentials from 'access_key' and 'secret_key' | |
# ## 3) shared profile from 'profile' | |
# ## 4) environment variables | |
# ## 5) shared credentials file | |
# ## 6) EC2 Instance Profile | |
# #access_key = "" | |
# #secret_key = "" | |
# #token = "" | |
# #role_arn = "" | |
# #profile = "" | |
# #shared_credential_file = "" | |
# | |
# ## Endpoint to make request against, the correct endpoint is automatically | |
# ## determined and this option should only be set if you wish to override the | |
# ## default. | |
# ## ex: endpoint_url = "http://localhost:8000" | |
# # endpoint_url = "" | |
# | |
# ## Namespace for the CloudWatch MetricDatums | |
# namespace = "InfluxData/Telegraf" | |
# | |
# ## If you have a large amount of metrics, you should consider to send statistic | |
# ## values instead of raw metrics which could not only improve performance but | |
# ## also save AWS API cost. If enable this flag, this plugin would parse the required | |
# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. | |
# ## You could use basicstats aggregator to calculate those fields. If not all statistic | |
# ## fields are available, all fields would still be sent as raw metrics. | |
# # write_statistics = false | |
# # Configuration for CrateDB to send metrics to. | |
# [[outputs.cratedb]] | |
# # A github.com/jackc/pgx connection string. | |
# # See https://godoc.org/github.com/jackc/pgx#ParseDSN | |
# url = "postgres://user:password@localhost/schema?sslmode=disable" | |
# # Timeout for all CrateDB queries. | |
# timeout = "5s" | |
# # Name of the table to store metrics in. | |
# table = "metrics" | |
# # If true, and the metrics table does not exist, create it automatically. | |
# table_create = true | |
# # Configuration for DataDog API to send metrics to. | |
# [[outputs.datadog]] | |
# ## Datadog API key | |
# apikey = "my-secret-key" # required. | |
# | |
# # The base endpoint URL can optionally be specified but it defaults to: | |
# #url = "https://app.datadoghq.com/api/v1/series" | |
# | |
# ## Connection timeout. | |
# # timeout = "5s" | |
# # Send metrics to nowhere at all | |
# [[outputs.discard]] | |
# # no configuration | |
# # Configuration for Elasticsearch to send metrics to. | |
# [[outputs.elasticsearch]] | |
# ## The full HTTP endpoint URL for your Elasticsearch instance | |
# ## Multiple urls can be specified as part of the same cluster, | |
# ## this means that only ONE of the urls will be written to each interval. | |
# urls = [ "http://node1.es.example.com:9200" ] # required. | |
# ## Elasticsearch client timeout, defaults to "5s" if not set. | |
# timeout = "5s" | |
# ## Set to true to ask Elasticsearch a list of all cluster nodes, | |
# ## thus it is not necessary to list all nodes in the urls config option. | |
# enable_sniffer = false | |
# ## Set the interval to check if the Elasticsearch nodes are available | |
# ## Setting to "0s" will disable the health check (not recommended in production) | |
# health_check_interval = "10s" | |
# ## HTTP basic authentication details | |
# # username = "telegraf" | |
# # password = "mypassword" | |
# | |
# ## Index Config | |
# ## The target index for metrics (Elasticsearch will create if it not exists). | |
# ## You can use the date specifiers below to create indexes per time frame. | |
# ## The metric timestamp will be used to decide the destination index name | |
# # %Y - year (2016) | |
# # %y - last two digits of year (00..99) | |
# # %m - month (01..12) | |
# # %d - day of month (e.g., 01) | |
# # %H - hour (00..23) | |
# # %V - week of the year (ISO week) (01..53) | |
# ## Additionally, you can specify a tag name using the notation {{tag_name}} | |
# ## which will be used as part of the index name. If the tag does not exist, | |
# ## the default tag value will be used. | |
# # index_name = "telegraf-{{host}}-%Y.%m.%d" | |
# # default_tag_value = "none" | |
# index_name = "telegraf-%Y.%m.%d" # required. | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## Template Config | |
# ## Set to true if you want telegraf to manage its index template. | |
# ## If enabled it will create a recommended index template for telegraf indexes | |
# manage_template = true | |
# ## The template name used for telegraf indexes | |
# template_name = "telegraf" | |
# ## Set to true if you want telegraf to overwrite an existing template | |
# overwrite_template = false | |
# # Send metrics to command as input over stdin | |
# [[outputs.exec]] | |
# ## Command to injest metrics via stdin. | |
# command = ["tee", "-a", "/dev/null"] | |
# | |
# ## Timeout for command to complete. | |
# # timeout = "5s" | |
# | |
# ## Data format to output. | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md | |
# # data_format = "influx" | |
# # Send telegraf metrics to file(s) | |
# [[outputs.file]] | |
# ## Files to write to, "stdout" is a specially handled file. | |
# files = ["stdout", "/tmp/metrics.out"] | |
# | |
# ## The file will be rotated after the time interval specified. When set | |
# ## to 0 no time based rotation is performed. | |
# # rotation_interval = "0d" | |
# | |
# ## The logfile will be rotated when it becomes larger than the specified | |
# ## size. When set to 0 no size based rotation is performed. | |
# # rotation_max_size = "0MB" | |
# | |
# ## Maximum number of rotated archives to keep, any older logs are deleted. | |
# ## If set to -1, no archives are removed. | |
# # rotation_max_archives = 5 | |
# | |
# ## Data format to output. | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md | |
# data_format = "influx" | |
# # Configuration for Graphite server to send metrics to | |
# [[outputs.graphite]] | |
# ## TCP endpoint for your graphite instance. | |
# ## If multiple endpoints are configured, output will be load balanced. | |
# ## Only one of the endpoints will be written to with each iteration. | |
# servers = ["localhost:2003"] | |
# ## Prefix metrics name | |
# prefix = "" | |
# ## Graphite output template | |
# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md | |
# template = "host.tags.measurement.field" | |
# | |
# ## Enable Graphite tags support | |
# # graphite_tag_support = false | |
# | |
# ## timeout in seconds for the write connection to graphite | |
# timeout = 2 | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Send telegraf metrics to graylog(s) | |
# [[outputs.graylog]] | |
# ## UDP endpoint for your graylog instance. | |
# servers = ["127.0.0.1:12201", "192.168.1.1:12201"] | |
# # Configurable HTTP health check resource based on metrics | |
# [[outputs.health]] | |
# ## Address and port to listen on. | |
# ## ex: service_address = "http://localhost:8080" | |
# ## service_address = "unix:///var/run/telegraf-health.sock" | |
# # service_address = "http://:8080" | |
# | |
# ## The maximum duration for reading the entire request. | |
# # read_timeout = "5s" | |
# ## The maximum duration for writing the entire response. | |
# # write_timeout = "5s" | |
# | |
# ## Username and password to accept for HTTP basic authentication. | |
# # basic_username = "user1" | |
# # basic_password = "secret" | |
# | |
# ## Allowed CA certificates for client certificates. | |
# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] | |
# | |
# ## TLS server certificate and private key. | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# | |
# ## One or more check sub-tables should be defined, it is also recommended to | |
# ## use metric filtering to limit the metrics that flow into this output. | |
# ## | |
# ## When using the default buffer sizes, this example will fail when the | |
# ## metric buffer is half full. | |
# ## | |
# ## namepass = ["internal_write"] | |
# ## tagpass = { output = ["influxdb"] } | |
# ## | |
# ## [[outputs.health.compares]] | |
# ## field = "buffer_size" | |
# ## lt = 5000.0 | |
# ## | |
# ## [[outputs.health.contains]] | |
# ## field = "buffer_size" | |
# # A plugin that can transmit metrics over HTTP | |
# [[outputs.http]] | |
# ## URL is the address to send metrics to | |
# url = "http://127.0.0.1:8080/telegraf" | |
# | |
# ## Timeout for HTTP message | |
# # timeout = "5s" | |
# | |
# ## HTTP method, one of: "POST" or "PUT" | |
# # method = "POST" | |
# | |
# ## HTTP Basic Auth credentials | |
# # username = "username" | |
# # password = "pa$$word" | |
# | |
# ## OAuth2 Client Credentials Grant | |
# # client_id = "clientid" | |
# # client_secret = "secret" | |
# # token_url = "https://indentityprovider/oauth2/v1/token" | |
# # scopes = ["urn:opc:idm:__myscopes__"] | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## Data format to output. | |
# ## Each data format has it's own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md | |
# # data_format = "influx" | |
# | |
# ## HTTP Content-Encoding for write request body, can be set to "gzip" to | |
# ## compress body or "identity" to apply no encoding. | |
# # content_encoding = "identity" | |
# | |
# ## Additional HTTP headers | |
# # [outputs.http.headers] | |
# # # Should be set manually to "application/json" for json data_format | |
# # Content-Type = "text/plain; charset=utf-8" | |
# # Configuration for sending metrics to InfluxDB | |
# [[outputs.influxdb_v2]] | |
# ## The URLs of the InfluxDB cluster nodes. | |
# ## | |
# ## Multiple URLs can be specified for a single cluster, only ONE of the | |
# ## urls will be written to each interval. | |
# urls = ["http://127.0.0.1:9999"] | |
# | |
# ## Token for authentication. | |
# token = "" | |
# | |
# ## Organization is the name of the organization you wish to write to; must exist. | |
# organization = "" | |
# | |
# ## Destination bucket to write into. | |
# bucket = "" | |
# | |
# ## The value of this tag will be used to determine the bucket. If this | |
# ## tag is not set the 'bucket' option is used as the default. | |
# # bucket_tag = "" | |
# | |
# ## If true, the bucket tag will not be added to the metric. | |
# # exclude_bucket_tag = false | |
# | |
# ## Timeout for HTTP messages. | |
# # timeout = "5s" | |
# | |
# ## Additional HTTP headers | |
# # http_headers = {"X-Special-Header" = "Special-Value"} | |
# | |
# ## HTTP Proxy override, if unset values the standard proxy environment | |
# ## variables are consulted to determine which proxy, if any, should be used. | |
# # http_proxy = "http://corporate.proxy:3128" | |
# | |
# ## HTTP User-Agent | |
# # user_agent = "telegraf" | |
# | |
# ## Content-Encoding for write request body, can be set to "gzip" to | |
# ## compress body or "identity" to apply no encoding. | |
# # content_encoding = "gzip" | |
# | |
# ## Enable or disable uint support for writing uints influxdb 2.0. | |
# # influx_uint_support = false | |
# | |
# ## Optional TLS Config for use on HTTP connections. | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Configuration for sending metrics to an Instrumental project | |
# [[outputs.instrumental]] | |
# ## Project API Token (required) | |
# api_token = "API Token" # required | |
# ## Prefix the metrics with a given name | |
# prefix = "" | |
# ## Stats output template (Graphite formatting) | |
# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite | |
# template = "host.tags.measurement.field" | |
# ## Timeout in seconds to connect | |
# timeout = "2s" | |
# ## Display Communcation to Instrumental | |
# debug = false | |
# # Configuration for the Kafka server to send metrics to | |
# [[outputs.kafka]] | |
# ## URLs of kafka brokers | |
# brokers = ["localhost:9092"] | |
# ## Kafka topic for producer messages | |
# topic = "telegraf" | |
# | |
# ## Optional Client id | |
# # client_id = "Telegraf" | |
# | |
# ## Set the minimal supported Kafka version. Setting this enables the use of new | |
# ## Kafka features and APIs. Of particular interest, lz4 compression | |
# ## requires at least version 0.10.0.0. | |
# ## ex: version = "1.1.0" | |
# # version = "" | |
# | |
# ## Optional topic suffix configuration. | |
# ## If the section is omitted, no suffix is used. | |
# ## Following topic suffix methods are supported: | |
# ## measurement - suffix equals to separator + measurement's name | |
# ## tags - suffix equals to separator + specified tags' values | |
# ## interleaved with separator | |
# | |
# ## Suffix equals to "_" + measurement name | |
# # [outputs.kafka.topic_suffix] | |
# # method = "measurement" | |
# # separator = "_" | |
# | |
# ## Suffix equals to "__" + measurement's "foo" tag value. | |
# ## If there's no such a tag, suffix equals to an empty string | |
# # [outputs.kafka.topic_suffix] | |
# # method = "tags" | |
# # keys = ["foo"] | |
# # separator = "__" | |
# | |
# ## Suffix equals to "_" + measurement's "foo" and "bar" | |
# ## tag values, separated by "_". If there is no such tags, | |
# ## their values treated as empty strings. | |
# # [outputs.kafka.topic_suffix] | |
# # method = "tags" | |
# # keys = ["foo", "bar"] | |
# # separator = "_" | |
# | |
# ## Telegraf tag to use as a routing key | |
# ## ie, if this tag exists, its value will be used as the routing key | |
# routing_tag = "host" | |
# | |
# ## Static routing key. Used when no routing_tag is set or as a fallback | |
# ## when the tag specified in routing tag is not found. If set to "random", | |
# ## a random value will be generated for each message. | |
# ## ex: routing_key = "random" | |
# ## routing_key = "telegraf" | |
# # routing_key = "" | |
# | |
# ## CompressionCodec represents the various compression codecs recognized by | |
# ## Kafka in messages. | |
# ## 0 : No compression | |
# ## 1 : Gzip compression | |
# ## 2 : Snappy compression | |
# ## 3 : LZ4 compression | |
# # compression_codec = 0 | |
# | |
# ## RequiredAcks is used in Produce Requests to tell the broker how many | |
# ## replica acknowledgements it must see before responding | |
# ## 0 : the producer never waits for an acknowledgement from the broker. | |
# ## This option provides the lowest latency but the weakest durability | |
# ## guarantees (some data will be lost when a server fails). | |
# ## 1 : the producer gets an acknowledgement after the leader replica has | |
# ## received the data. This option provides better durability as the | |
# ## client waits until the server acknowledges the request as successful | |
# ## (only messages that were written to the now-dead leader but not yet | |
# ## replicated will be lost). | |
# ## -1: the producer gets an acknowledgement after all in-sync replicas have | |
# ## received the data. This option provides the best durability, we | |
# ## guarantee that no messages will be lost as long as at least one in | |
# ## sync replica remains. | |
# # required_acks = -1 | |
# | |
# ## The maximum number of times to retry sending a metric before failing | |
# ## until the next flush. | |
# # max_retry = 3 | |
# | |
# ## The maximum permitted size of a message. Should be set equal to or | |
# ## smaller than the broker's 'message.max.bytes'. | |
# # max_message_bytes = 1000000 | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## Optional SASL Config | |
# # sasl_username = "kafka" | |
# # sasl_password = "secret" | |
# | |
# ## Data format to output. | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md | |
# # data_format = "influx" | |
# # Configuration for the AWS Kinesis output. | |
# [[outputs.kinesis]] | |
# ## Amazon REGION of kinesis endpoint. | |
# region = "ap-southeast-2" | |
# | |
# ## Amazon Credentials | |
# ## Credentials are loaded in the following order | |
# ## 1) Assumed credentials via STS if role_arn is specified | |
# ## 2) explicit credentials from 'access_key' and 'secret_key' | |
# ## 3) shared profile from 'profile' | |
# ## 4) environment variables | |
# ## 5) shared credentials file | |
# ## 6) EC2 Instance Profile | |
# #access_key = "" | |
# #secret_key = "" | |
# #token = "" | |
# #role_arn = "" | |
# #profile = "" | |
# #shared_credential_file = "" | |
# | |
# ## Endpoint to make request against, the correct endpoint is automatically | |
# ## determined and this option should only be set if you wish to override the | |
# ## default. | |
# ## ex: endpoint_url = "http://localhost:8000" | |
# # endpoint_url = "" | |
# | |
# ## Kinesis StreamName must exist prior to starting telegraf. | |
# streamname = "StreamName" | |
# ## DEPRECATED: PartitionKey as used for sharding data. | |
# partitionkey = "PartitionKey" | |
# ## DEPRECATED: If set the paritionKey will be a random UUID on every put. | |
# ## This allows for scaling across multiple shards in a stream. | |
# ## This will cause issues with ordering. | |
# use_random_partitionkey = false | |
# ## The partition key can be calculated using one of several methods: | |
# ## | |
# ## Use a static value for all writes: | |
# # [outputs.kinesis.partition] | |
# # method = "static" | |
# # key = "howdy" | |
# # | |
# ## Use a random partition key on each write: | |
# # [outputs.kinesis.partition] | |
# # method = "random" | |
# # | |
# ## Use the measurement name as the partition key: | |
# # [outputs.kinesis.partition] | |
# # method = "measurement" | |
# # | |
# ## Use the value of a tag for all writes, if the tag is not set the empty | |
# ## default option will be used. When no default, defaults to "telegraf" | |
# # [outputs.kinesis.partition] | |
# # method = "tag" | |
# # key = "host" | |
# # default = "mykey" | |
# | |
# | |
# ## Data format to output. | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md | |
# data_format = "influx" | |
# | |
# ## debug will show upstream aws messages. | |
# debug = false | |
# # Configuration for Librato API to send metrics to. | |
# [[outputs.librato]] | |
# ## Librator API Docs | |
# ## http://dev.librato.com/v1/metrics-authentication | |
# ## Librato API user | |
# api_user = "telegraf@influxdb.com" # required. | |
# ## Librato API token | |
# api_token = "my-secret-token" # required. | |
# ## Debug | |
# # debug = false | |
# ## Connection timeout. | |
# # timeout = "5s" | |
# ## Output source Template (same as graphite buckets) | |
# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite | |
# ## This template is used in librato's source (not metric's name) | |
# template = "host" | |
# | |
# # Configuration for MQTT server to send metrics to | |
# [[outputs.mqtt]] | |
# servers = ["localhost:1883"] # required. | |
# | |
# ## MQTT outputs send metrics to this topic format | |
# ## "<topic_prefix>/<hostname>/<pluginname>/" | |
# ## ex: prefix/web01.example.com/mem | |
# topic_prefix = "telegraf" | |
# | |
# ## QoS policy for messages | |
# ## 0 = at most once | |
# ## 1 = at least once | |
# ## 2 = exactly once | |
# # qos = 2 | |
# | |
# ## username and password to connect MQTT server. | |
# # username = "telegraf" | |
# # password = "metricsmetricsmetricsmetrics" | |
# | |
# ## client ID, if not set a random ID is generated | |
# # client_id = "" | |
# | |
# ## Timeout for write operations. default: 5s | |
# # timeout = "5s" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, | |
# ## metrics are written one metric per MQTT message. | |
# # batch = false | |
# | |
# ## When true, metric will have RETAIN flag set, making broker cache entries until someone | |
# ## actually reads it | |
# # retain = false | |
# | |
# ## Data format to output. | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md | |
# data_format = "influx" | |
# # Send telegraf measurements to NATS | |
# [[outputs.nats]] | |
# ## URLs of NATS servers | |
# servers = ["nats://localhost:4222"] | |
# ## Optional credentials | |
# # username = "" | |
# # password = "" | |
# ## NATS subject for producer messages | |
# subject = "telegraf" | |
# | |
# ## Use Transport Layer Security | |
# # secure = false | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## Data format to output. | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md | |
# data_format = "influx" | |
# # Send telegraf measurements to NSQD | |
# [[outputs.nsq]] | |
# ## Location of nsqd instance listening on TCP | |
# server = "localhost:4150" | |
# ## NSQ topic for producer messages | |
# topic = "telegraf" | |
# | |
# ## Data format to output. | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md | |
# data_format = "influx" | |
# # Configuration for OpenTSDB server to send metrics to | |
# [[outputs.opentsdb]] | |
# ## prefix for metrics keys | |
# prefix = "my.specific.prefix." | |
# | |
# ## DNS name of the OpenTSDB server | |
# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the | |
# ## telnet API. "http://opentsdb.example.com" will use the Http API. | |
# host = "opentsdb.example.com" | |
# | |
# ## Port of the OpenTSDB server | |
# port = 4242 | |
# | |
# ## Number of data points to send to OpenTSDB in Http requests. | |
# ## Not used with telnet API. | |
# http_batch_size = 50 | |
# | |
# ## URI Path for Http requests to OpenTSDB. | |
# ## Used in cases where OpenTSDB is located behind a reverse proxy. | |
# http_path = "/api/put" | |
# | |
# ## Debug true - Prints OpenTSDB communication | |
# debug = false | |
# | |
# ## Separator separates measurement name from field | |
# separator = "_" | |
# # Configuration for the Prometheus client to spawn | |
# [[outputs.prometheus_client]] | |
# ## Address to listen on | |
# listen = ":9273" | |
# | |
# ## Use HTTP Basic Authentication. | |
# # basic_username = "Foo" | |
# # basic_password = "Bar" | |
# | |
# ## If set, the IP Ranges which are allowed to access metrics. | |
# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] | |
# # ip_range = [] | |
# | |
# ## Path to publish the metrics on. | |
# # path = "/metrics" | |
# | |
# ## Expiration interval for each metric. 0 == no expiration | |
# # expiration_interval = "60s" | |
# | |
# ## Collectors to enable, valid entries are "gocollector" and "process". | |
# ## If unset, both are enabled. | |
# # collectors_exclude = ["gocollector", "process"] | |
# | |
# ## Send string metrics as Prometheus labels. | |
# ## Unless set to false all string metrics will be sent as labels. | |
# # string_as_label = true | |
# | |
# ## If set, enable TLS with the given certificate. | |
# # tls_cert = "/etc/ssl/telegraf.crt" | |
# # tls_key = "/etc/ssl/telegraf.key" | |
# | |
# ## Set one or more allowed client CA certificate file names to | |
# ## enable mutually authenticated TLS connections | |
# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] | |
# | |
# ## Export metric collection time. | |
# # export_timestamp = false | |
# # Configuration for the Riemann server to send metrics to | |
# [[outputs.riemann]] | |
# ## The full TCP or UDP URL of the Riemann server | |
# url = "tcp://localhost:5555" | |
# | |
# ## Riemann event TTL, floating-point time in seconds. | |
# ## Defines how long that an event is considered valid for in Riemann | |
# # ttl = 30.0 | |
# | |
# ## Separator to use between measurement and field name in Riemann service name | |
# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' | |
# separator = "/" | |
# | |
# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name | |
# # measurement_as_attribute = false | |
# | |
# ## Send string metrics as Riemann event states. | |
# ## Unless enabled all string metrics will be ignored | |
# # string_as_state = false | |
# | |
# ## A list of tag keys whose values get sent as Riemann tags. | |
# ## If empty, all Telegraf tag values will be sent as tags | |
# # tag_keys = ["telegraf","custom_tag"] | |
# | |
# ## Additional Riemann tags to send. | |
# # tags = ["telegraf-output"] | |
# | |
# ## Description for Riemann event | |
# # description_text = "metrics collected from telegraf" | |
# | |
# ## Riemann client write timeout, defaults to "5s" if not set. | |
# # timeout = "5s" | |
# # Configuration for the Riemann server to send metrics to | |
# [[outputs.riemann_legacy]] | |
# ## URL of server | |
# url = "localhost:5555" | |
# ## transport protocol to use either tcp or udp | |
# transport = "tcp" | |
# ## separator to use between input name and field name in Riemann service name | |
# separator = " " | |
# # Generic socket writer capable of handling multiple socket types. | |
# [[outputs.socket_writer]] | |
# ## URL to connect to | |
# # address = "tcp://127.0.0.1:8094" | |
# # address = "tcp://example.com:http" | |
# # address = "tcp4://127.0.0.1:8094" | |
# # address = "tcp6://127.0.0.1:8094" | |
# # address = "tcp6://[2001:db8::1]:8094" | |
# # address = "udp://127.0.0.1:8094" | |
# # address = "udp4://127.0.0.1:8094" | |
# # address = "udp6://127.0.0.1:8094" | |
# # address = "unix:///tmp/telegraf.sock" | |
# # address = "unixgram:///tmp/telegraf.sock" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## Period between keep alive probes. | |
# ## Only applies to TCP sockets. | |
# ## 0 disables keep alive probes. | |
# ## Defaults to the OS configuration. | |
# # keep_alive_period = "5m" | |
# | |
# ## Data format to generate. | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md | |
# # data_format = "influx" | |
# # Configuration for Google Cloud Stackdriver to send metrics to | |
# [[outputs.stackdriver]] | |
# ## GCP Project | |
# project = "erudite-bloom-151019" | |
# | |
# ## The namespace for the metric descriptor | |
# namespace = "telegraf" | |
# | |
# ## Custom resource type | |
# # resource_type = "generic_node" | |
# | |
# ## Additonal resource labels | |
# # [outputs.stackdriver.resource_labels] | |
# # node_id = "$HOSTNAME" | |
# # namespace = "myapp" | |
# # location = "eu-north0" | |
# # Configuration for Syslog server to send metrics to | |
# [[outputs.syslog]] | |
# ## URL to connect to | |
# ## ex: address = "tcp://127.0.0.1:8094" | |
# ## ex: address = "tcp4://127.0.0.1:8094" | |
# ## ex: address = "tcp6://127.0.0.1:8094" | |
# ## ex: address = "tcp6://[2001:db8::1]:8094" | |
# ## ex: address = "udp://127.0.0.1:8094" | |
# ## ex: address = "udp4://127.0.0.1:8094" | |
# ## ex: address = "udp6://127.0.0.1:8094" | |
# address = "tcp://127.0.0.1:8094" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## Period between keep alive probes. | |
# ## Only applies to TCP sockets. | |
# ## 0 disables keep alive probes. | |
# ## Defaults to the OS configuration. | |
# # keep_alive_period = "5m" | |
# | |
# ## The framing technique with which it is expected that messages are | |
# ## transported (default = "octet-counting"). Whether the messages come | |
# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), | |
# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must | |
# ## be one of "octet-counting", "non-transparent". | |
# # framing = "octet-counting" | |
# | |
# ## The trailer to be expected in case of non-trasparent framing (default = "LF"). | |
# ## Must be one of "LF", or "NUL". | |
# # trailer = "LF" | |
# | |
# ## SD-PARAMs settings | |
# ## Syslog messages can contain key/value pairs within zero or more | |
# ## structured data sections. For each unrecognised metric tag/field a | |
# ## SD-PARAMS is created. | |
# ## | |
# ## Example: | |
# ## [[outputs.syslog]] | |
# ## sdparam_separator = "_" | |
# ## default_sdid = "default@32473" | |
# ## sdids = ["foo@123", "bar@456"] | |
# ## | |
# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 | |
# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] | |
# | |
# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") | |
# # sdparam_separator = "_" | |
# | |
# ## Default sdid used for tags/fields that don't contain a prefix defined in | |
# ## the explict sdids setting below If no default is specified, no SD-PARAMs | |
# ## will be used for unrecognised field. | |
# # default_sdid = "default@32473" | |
# | |
# ## List of explicit prefixes to extract from tag/field keys and use as the | |
# ## SDID, if they match (see above example for more details): | |
# # sdids = ["foo@123", "bar@456"] | |
# | |
# ## Default severity value. Severity and Facility are used to calculate the | |
# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field | |
# ## with key "severity_code" is defined. If unset, 5 (notice) is the default | |
# # default_severity_code = 5 | |
# | |
# ## Default facility value. Facility and Severity are used to calculate the | |
# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with | |
# ## key "facility_code" is defined. If unset, 1 (user-level) is the default | |
# # default_facility_code = 1 | |
# | |
# ## Default APP-NAME value (RFC5424#section-6.2.5) | |
# ## Used when no metric tag with key "appname" is defined. | |
# ## If unset, "Telegraf" is the default | |
# # default_appname = "Telegraf" | |
# # Configuration for Wavefront server to send metrics to | |
# [[outputs.wavefront]] | |
# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy | |
# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 | |
# url = "https://metrics.wavefront.com" | |
# | |
# ## Authentication Token for Wavefront. Only required if using Direct Ingestion | |
# #token = "DUMMY_TOKEN" | |
# | |
# ## DNS name of the wavefront proxy server. Do not use if url is specified | |
# #host = "wavefront.example.com" | |
# | |
# ## Port that the Wavefront proxy server listens on. Do not use if url is specified | |
# #port = 2878 | |
# | |
# ## prefix for metrics keys | |
# #prefix = "my.specific.prefix." | |
# | |
# ## whether to use "value" for name of simple fields. default is false | |
# #simple_fields = false | |
# | |
# ## character to use between metric and field name. default is . (dot) | |
# #metric_separator = "." | |
# | |
# ## Convert metric name paths to use metricSeparator character | |
# ## When true will convert all _ (underscore) characters in final metric name. default is true | |
# #convert_paths = true | |
# | |
# ## Use Strict rules to sanitize metric and tag names from invalid characters | |
# ## When enabled forward slash (/) and comma (,) will be accpeted | |
# #use_strict = false | |
# | |
# ## Use Regex to sanitize metric and tag names from invalid characters | |
# ## Regex is more thorough, but significantly slower. default is false | |
# #use_regex = false | |
# | |
# ## point tags to use as the source name for Wavefront (if none found, host will be used) | |
# #source_override = ["hostname", "address", "agent_host", "node_host"] | |
# | |
# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true | |
# #convert_bool = true | |
# | |
# ## Define a mapping, namespaced by metric prefix, from string values to numeric values | |
# ## deprecated in 1.9; use the enum processor plugin | |
# #[[outputs.wavefront.string_to_number.elasticsearch]] | |
# # green = 1.0 | |
# # yellow = 0.5 | |
# # red = 0.0 | |
############################################################################### | |
# PROCESSOR PLUGINS # | |
############################################################################### | |
# # Convert values to another metric value type | |
# [[processors.converter]] | |
# ## Tags to convert | |
# ## | |
# ## The table key determines the target type, and the array of key-values | |
# ## select the keys to convert. The array may contain globs. | |
# ## <target-type> = [<tag-key>...] | |
# [processors.converter.tags] | |
# string = [] | |
# integer = [] | |
# unsigned = [] | |
# boolean = [] | |
# float = [] | |
# | |
# ## Fields to convert | |
# ## | |
# ## The table key determines the target type, and the array of key-values | |
# ## select the keys to convert. The array may contain globs. | |
# ## <target-type> = [<field-key>...] | |
# [processors.converter.fields] | |
# tag = [] | |
# string = [] | |
# integer = [] | |
# unsigned = [] | |
# boolean = [] | |
# float = [] | |
# # Dates measurements, tags, and fields that pass through this filter. | |
# [[processors.date]] | |
# ## New tag to create | |
# tag_key = "month" | |
# | |
# ## Date format string, must be a representation of the Go "reference time" | |
# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". | |
# date_format = "Jan" | |
# # Map enum values according to given table. | |
# [[processors.enum]] | |
# [[processors.enum.mapping]] | |
# ## Name of the field to map | |
# field = "status" | |
# | |
# ## Name of the tag to map | |
# # tag = "status" | |
# | |
# ## Destination tag or field to be used for the mapped value. By default the | |
# ## source tag or field is used, overwriting the original value. | |
# dest = "status_code" | |
# | |
# ## Default value to be used for all values not contained in the mapping | |
# ## table. When unset, the unmodified value for the field will be used if no | |
# ## match is found. | |
# # default = 0 | |
# | |
# ## Table of mappings | |
# [processors.enum.mapping.value_mappings] | |
# green = 1 | |
# amber = 2 | |
# red = 3 | |
# # Apply metric modifications using override semantics. | |
# [[processors.override]] | |
# ## All modifications on inputs and aggregators can be overridden: | |
# # name_override = "new_name" | |
# # name_prefix = "new_name_prefix" | |
# # name_suffix = "new_name_suffix" | |
# | |
# ## Tags to be added (all values must be strings) | |
# # [processors.override.tags] | |
# # additional_tag = "tag_value" | |
# # Parse a value in a specified field/tag(s) and add the result in a new metric | |
# [[processors.parser]] | |
# ## The name of the fields whose value will be parsed. | |
# parse_fields = [] | |
# | |
# ## If true, incoming metrics are not emitted. | |
# drop_original = false | |
# | |
# ## If set to override, emitted metrics will be merged by overriding the | |
# ## original metric using the newly parsed metrics. | |
# merge = "override" | |
# | |
# ## The dataformat to be read from files | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md | |
# data_format = "influx" | |
# # Rotate a single valued metric into a multi field metric | |
# [[processors.pivot]] | |
# ## Tag to use for naming the new field. | |
# tag_key = "name" | |
# ## Field to use as the value of the new field. | |
# value_key = "value" | |
# # Print all metrics that pass through this filter. | |
# [[processors.printer]] | |
# # Transforms tag and field values with regex pattern | |
# [[processors.regex]] | |
# ## Tag and field conversions defined in a separate sub-tables | |
# # [[processors.regex.tags]] | |
# # ## Tag to change | |
# # key = "resp_code" | |
# # ## Regular expression to match on a tag value | |
# # pattern = "^(\\d)\\d\\d$" | |
# # ## Matches of the pattern will be replaced with this string. Use ${1} | |
# # ## notation to use the text of the first submatch. | |
# # replacement = "${1}xx" | |
# | |
# # [[processors.regex.fields]] | |
# # ## Field to change | |
# # key = "request" | |
# # ## All the power of the Go regular expressions available here | |
# # ## For example, named subgroups | |
# # pattern = "^/api(?P<method>/[\\w/]+)\\S*" | |
# # replacement = "${method}" | |
# # ## If result_key is present, a new field will be created | |
# # ## instead of changing existing field | |
# # result_key = "method" | |
# | |
# ## Multiple conversions may be applied for one field sequentially | |
# ## Let's extract one more value | |
# # [[processors.regex.fields]] | |
# # key = "request" | |
# # pattern = ".*category=(\\w+).*" | |
# # replacement = "${1}" | |
# # result_key = "search_category" | |
# # Rename measurements, tags, and fields that pass through this filter. | |
# [[processors.rename]] | |
# # Perform string processing on tags, fields, and measurements | |
# [[processors.strings]] | |
# ## Convert a tag value to uppercase | |
# # [[processors.strings.uppercase]] | |
# # tag = "method" | |
# | |
# ## Convert a field value to lowercase and store in a new field | |
# # [[processors.strings.lowercase]] | |
# # field = "uri_stem" | |
# # dest = "uri_stem_normalised" | |
# | |
# ## Trim leading and trailing whitespace using the default cutset | |
# # [[processors.strings.trim]] | |
# # field = "message" | |
# | |
# ## Trim leading characters in cutset | |
# # [[processors.strings.trim_left]] | |
# # field = "message" | |
# # cutset = "\t" | |
# | |
# ## Trim trailing characters in cutset | |
# # [[processors.strings.trim_right]] | |
# # field = "message" | |
# # cutset = "\r\n" | |
# | |
# ## Trim the given prefix from the field | |
# # [[processors.strings.trim_prefix]] | |
# # field = "my_value" | |
# # prefix = "my_" | |
# | |
# ## Trim the given suffix from the field | |
# # [[processors.strings.trim_suffix]] | |
# # field = "read_count" | |
# # suffix = "_count" | |
# | |
# ## Replace all non-overlapping instances of old with new | |
# # [[processors.strings.replace]] | |
# # measurement = "*" | |
# # old = ":" | |
# # new = "_" | |
# | |
# ## Trims strings based on width | |
# # [[processors.strings.left]] | |
# # field = "message" | |
# # width = 10 | |
# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. | |
# [[processors.tag_limit]] | |
# ## Maximum number of tags to preserve | |
# limit = 10 | |
# | |
# ## List of tags to preferentially preserve | |
# keep = ["foo", "bar", "baz"] | |
# # Print all metrics that pass through this filter. | |
# [[processors.topk]] | |
# ## How many seconds between aggregations | |
# # period = 10 | |
# | |
# ## How many top metrics to return | |
# # k = 10 | |
# | |
# ## Over which tags should the aggregation be done. Globs can be specified, in | |
# ## which case any tag matching the glob will aggregated over. If set to an | |
# ## empty list is no aggregation over tags is done | |
# # group_by = ['*'] | |
# | |
# ## Over which fields are the top k are calculated | |
# # fields = ["value"] | |
# | |
# ## What aggregation to use. Options: sum, mean, min, max | |
# # aggregation = "mean" | |
# | |
# ## Instead of the top k largest metrics, return the bottom k lowest metrics | |
# # bottomk = false | |
# | |
# ## The plugin assigns each metric a GroupBy tag generated from its name and | |
# ## tags. If this setting is different than "" the plugin will add a | |
# ## tag (which name will be the value of this setting) to each metric with | |
# ## the value of the calculated GroupBy tag. Useful for debugging | |
# # add_groupby_tag = "" | |
# | |
# ## These settings provide a way to know the position of each metric in | |
# ## the top k. The 'add_rank_field' setting allows to specify for which | |
# ## fields the position is required. If the list is non empty, then a field | |
# ## will be added to each and every metric for each string present in this | |
# ## setting. This field will contain the ranking of the group that | |
# ## the metric belonged to when aggregated over that field. | |
# ## The name of the field will be set to the name of the aggregation field, | |
# ## suffixed with the string '_topk_rank' | |
# # add_rank_fields = [] | |
# | |
# ## These settings provide a way to know what values the plugin is generating | |
# ## when aggregating metrics. The 'add_agregate_field' setting allows to | |
# ## specify for which fields the final aggregation value is required. If the | |
# ## list is non empty, then a field will be added to each every metric for | |
# ## each field present in this setting. This field will contain | |
# ## the computed aggregation for the group that the metric belonged to when | |
# ## aggregated over that field. | |
# ## The name of the field will be set to the name of the aggregation field, | |
# ## suffixed with the string '_topk_aggregate' | |
# # add_aggregate_fields = [] | |
# # Rotate multi field metric into several single field metrics | |
# [[processors.unpivot]] | |
# ## Tag to use for the name. | |
# tag_key = "name" | |
# ## Field to use for the name of the value. | |
# value_key = "value" | |
############################################################################### | |
# AGGREGATOR PLUGINS # | |
############################################################################### | |
# # Keep the aggregate basicstats of each metric passing through. | |
# [[aggregators.basicstats]] | |
# ## The period on which to flush & clear the aggregator. | |
# period = "30s" | |
# ## If true, the original metric will be dropped by the | |
# ## aggregator and will not get sent to the output plugins. | |
# drop_original = false | |
# | |
# ## Configures which basic stats to push as fields | |
# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] | |
# # Report the final metric of a series | |
# [[aggregators.final]] | |
# ## The period on which to flush & clear the aggregator. | |
# period = "30s" | |
# ## If true, the original metric will be dropped by the | |
# ## aggregator and will not get sent to the output plugins. | |
# drop_original = false | |
# | |
# ## The time that a series is not updated until considering it final. | |
# series_timeout = "5m" | |
# # Create aggregate histograms. | |
# [[aggregators.histogram]] | |
# ## The period in which to flush the aggregator. | |
# period = "30s" | |
# | |
# ## If true, the original metric will be dropped by the | |
# ## aggregator and will not get sent to the output plugins. | |
# drop_original = false | |
# | |
# ## If true, the histogram will be reset on flush instead | |
# ## of accumulating the results. | |
# reset = false | |
# | |
# ## Example config that aggregates all fields of the metric. | |
# # [[aggregators.histogram.config]] | |
# # ## The set of buckets. | |
# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] | |
# # ## The name of metric. | |
# # measurement_name = "cpu" | |
# | |
# ## Example config that aggregates only specific fields of the metric. | |
# # [[aggregators.histogram.config]] | |
# # ## The set of buckets. | |
# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] | |
# # ## The name of metric. | |
# # measurement_name = "diskio" | |
# # ## The concrete fields of metric | |
# # fields = ["io_time", "read_time", "write_time"] | |
# # Keep the aggregate min/max of each metric passing through. | |
# [[aggregators.minmax]] | |
# ## General Aggregator Arguments: | |
# ## The period on which to flush & clear the aggregator. | |
# period = "30s" | |
# ## If true, the original metric will be dropped by the | |
# ## aggregator and will not get sent to the output plugins. | |
# drop_original = false | |
# # Count the occurrence of values in fields. | |
# [[aggregators.valuecounter]] | |
# ## General Aggregator Arguments: | |
# ## The period on which to flush & clear the aggregator. | |
# period = "30s" | |
# ## If true, the original metric will be dropped by the | |
# ## aggregator and will not get sent to the output plugins. | |
# drop_original = false | |
# ## The fields for which the values will be counted | |
# fields = [] | |
############################################################################### | |
# INPUT PLUGINS # | |
############################################################################### | |
# Read metrics about cpu usage | |
[[inputs.cpu]] | |
## Whether to report per-cpu stats or not | |
percpu = true | |
## Whether to report total system cpu stats or not | |
totalcpu = true | |
## If true, collect raw CPU time metrics. | |
collect_cpu_time = false | |
## If true, compute and report the sum of all non-idle CPU states. | |
report_active = false | |
# Read metrics about disk usage by mount point | |
[[inputs.disk]] | |
## By default stats will be gathered for all mount points. | |
## Set mount_points will restrict the stats to only the specified mount points. | |
# mount_points = ["/"] | |
## Ignore mount points by filesystem type. | |
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] | |
# Read metrics about disk IO by device | |
[[inputs.diskio]] | |
## By default, telegraf will gather stats for all devices including | |
## disk partitions. | |
## Setting devices will restrict the stats to the specified devices. | |
# devices = ["sda", "sdb", "vd*"] | |
## Uncomment the following line if you need disk serial numbers. | |
# skip_serial_number = false | |
# | |
## On systems which support it, device metadata can be added in the form of | |
## tags. | |
## Currently only Linux is supported via udev properties. You can view | |
## available properties for a device by running: | |
## 'udevadm info -q property -n /dev/sda' | |
## Note: Most, but not all, udev properties can be accessed this way. Properties | |
## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. | |
# device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] | |
# | |
## Using the same metadata source as device_tags, you can also customize the | |
## name of the device via templates. | |
## The 'name_templates' parameter is a list of templates to try and apply to | |
## the device. The template may contain variables in the form of '$PROPERTY' or | |
## '${PROPERTY}'. The first template which does not contain any variables not | |
## present for the device is used as the device name tag. | |
## The typical use case is for LVM volumes, to get the VG/LV name instead of | |
## the near-meaningless DM-0 name. | |
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] | |
# Get kernel statistics from /proc/stat | |
[[inputs.kernel]] | |
# no configuration | |
# Read metrics about memory usage | |
[[inputs.mem]] | |
# no configuration | |
# Get the number of processes and group them by status | |
[[inputs.processes]] | |
# no configuration | |
# Read metrics about swap memory usage | |
[[inputs.swap]] | |
# no configuration | |
# Read metrics about system load & uptime | |
[[inputs.system]] | |
## Uncomment to remove deprecated metrics. | |
# fielddrop = ["uptime_format"] | |
# # Gather ActiveMQ metrics | |
# [[inputs.activemq]] | |
# ## ActiveMQ WebConsole URL | |
# url = "http://127.0.0.1:8161" | |
# | |
# ## Required ActiveMQ Endpoint | |
# ## deprecated in 1.11; use the url option | |
# # server = "127.0.0.1" | |
# # port = 8161 | |
# | |
# ## Credentials for basic HTTP authentication | |
# # username = "admin" | |
# # password = "admin" | |
# | |
# ## Required ActiveMQ webadmin root path | |
# # webadmin = "admin" | |
# | |
# ## Maximum time to receive response. | |
# # response_timeout = "5s" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Read stats from aerospike server(s) | |
# [[inputs.aerospike]] | |
# ## Aerospike servers to connect to (with port) | |
# ## This plugin will query all namespaces the aerospike | |
# ## server has configured and get stats for them. | |
# servers = ["localhost:3000"] | |
# | |
# # username = "telegraf" | |
# # password = "pa$$word" | |
# | |
# ## Optional TLS Config | |
# # enable_tls = false | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## If false, skip chain & host verification | |
# # insecure_skip_verify = true | |
# # Read Apache status information (mod_status) | |
# [[inputs.apache]] | |
# ## An array of URLs to gather from, must be directed at the machine | |
# ## readable version of the mod_status page including the auto query string. | |
# ## Default is "http://localhost/server-status?auto". | |
# urls = ["http://localhost/server-status?auto"] | |
# | |
# ## Credentials for basic HTTP authentication. | |
# # username = "myuser" | |
# # password = "mypassword" | |
# | |
# ## Maximum time to receive response. | |
# # response_timeout = "5s" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Monitor APC UPSes connected to apcupsd | |
# [[inputs.apcupsd]] | |
# # A list of running apcupsd server to connect to. | |
# # If not provided will default to tcp://127.0.0.1:3551 | |
# servers = ["tcp://127.0.0.1:3551"] | |
# | |
# ## Timeout for dialing server. | |
# timeout = "5s" | |
# # Gather metrics from Apache Aurora schedulers | |
# [[inputs.aurora]] | |
# ## Schedulers are the base addresses of your Aurora Schedulers | |
# schedulers = ["http://127.0.0.1:8081"] | |
# | |
# ## Set of role types to collect metrics from. | |
# ## | |
# ## The scheduler roles are checked each interval by contacting the | |
# ## scheduler nodes; zookeeper is not contacted. | |
# # roles = ["leader", "follower"] | |
# | |
# ## Timeout is the max time for total network operations. | |
# # timeout = "5s" | |
# | |
# ## Username and password are sent using HTTP Basic Auth. | |
# # username = "username" | |
# # password = "pa$$word" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Read metrics of bcache from stats_total and dirty_data | |
# [[inputs.bcache]] | |
# ## Bcache sets path | |
# ## If not specified, then default is: | |
# bcachePath = "/sys/fs/bcache" | |
# | |
# ## By default, telegraf gather stats for all bcache devices | |
# ## Setting devices will restrict the stats to the specified | |
# ## bcache devices. | |
# bcacheDevs = ["bcache0"] | |
# # Collects Beanstalkd server and tubes stats | |
# [[inputs.beanstalkd]] | |
# ## Server to collect data from | |
# server = "localhost:11300" | |
# | |
# ## List of tubes to gather stats about. | |
# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command | |
# tubes = ["notifications"] | |
# # Read BIND nameserver XML statistics | |
# [[inputs.bind]] | |
# ## An array of BIND XML statistics URI to gather stats. | |
# ## Default is "http://localhost:8053/xml/v3". | |
# # urls = ["http://localhost:8053/xml/v3"] | |
# # gather_memory_contexts = false | |
# # gather_views = false | |
# # Collect bond interface status, slaves statuses and failures count | |
# [[inputs.bond]] | |
# ## Sets 'proc' directory path | |
# ## If not specified, then default is /proc | |
# # host_proc = "/proc" | |
# | |
# ## By default, telegraf gather stats for all bond interfaces | |
# ## Setting interfaces will restrict the stats to the specified | |
# ## bond interfaces. | |
# # bond_interfaces = ["bond0"] | |
# # Collect Kafka topics and consumers status from Burrow HTTP API. | |
# [[inputs.burrow]] | |
# ## Burrow API endpoints in format "schema://host:port". | |
# ## Default is "http://localhost:8000". | |
# servers = ["http://localhost:8000"] | |
# | |
# ## Override Burrow API prefix. | |
# ## Useful when Burrow is behind reverse-proxy. | |
# # api_prefix = "/v3/kafka" | |
# | |
# ## Maximum time to receive response. | |
# # response_timeout = "5s" | |
# | |
# ## Limit per-server concurrent connections. | |
# ## Useful in case of large number of topics or consumer groups. | |
# # concurrent_connections = 20 | |
# | |
# ## Filter clusters, default is no filtering. | |
# ## Values can be specified as glob patterns. | |
# # clusters_include = [] | |
# # clusters_exclude = [] | |
# | |
# ## Filter consumer groups, default is no filtering. | |
# ## Values can be specified as glob patterns. | |
# # groups_include = [] | |
# # groups_exclude = [] | |
# | |
# ## Filter topics, default is no filtering. | |
# ## Values can be specified as glob patterns. | |
# # topics_include = [] | |
# # topics_exclude = [] | |
# | |
# ## Credentials for basic HTTP authentication. | |
# # username = "" | |
# # password = "" | |
# | |
# ## Optional SSL config | |
# # ssl_ca = "/etc/telegraf/ca.pem" | |
# # ssl_cert = "/etc/telegraf/cert.pem" | |
# # ssl_key = "/etc/telegraf/key.pem" | |
# # insecure_skip_verify = false | |
# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. | |
# [[inputs.ceph]] | |
# ## This is the recommended interval to poll. Too frequent and you will lose | |
# ## data points due to timeouts during rebalancing and recovery | |
# interval = '1m' | |
# | |
# ## All configuration values are optional, defaults are shown below | |
# | |
# ## location of ceph binary | |
# ceph_binary = "/usr/bin/ceph" | |
# | |
# ## directory in which to look for socket files | |
# socket_dir = "/var/run/ceph" | |
# | |
# ## prefix of MON and OSD socket files, used to determine socket type | |
# mon_prefix = "ceph-mon" | |
# osd_prefix = "ceph-osd" | |
# | |
# ## suffix used to identify socket files | |
# socket_suffix = "asok" | |
# | |
# ## Ceph user to authenticate as | |
# ceph_user = "client.admin" | |
# | |
# ## Ceph configuration to use to locate the cluster | |
# ceph_config = "/etc/ceph/ceph.conf" | |
# | |
# ## Whether to gather statistics via the admin socket | |
# gather_admin_socket_stats = true | |
# | |
# ## Whether to gather statistics via ceph commands | |
# gather_cluster_stats = false | |
# # Read specific statistics per cgroup | |
# [[inputs.cgroup]] | |
# ## Directories in which to look for files, globs are supported. | |
# ## Consider restricting paths to the set of cgroups you really | |
# ## want to monitor if you have a large number of cgroups, to avoid | |
# ## any cardinality issues. | |
# # paths = [ | |
# # "/cgroup/memory", | |
# # "/cgroup/memory/child1", | |
# # "/cgroup/memory/child2/*", | |
# # ] | |
# ## cgroup stat fields, as file names, globs are supported. | |
# ## these file names are appended to each path from above. | |
# # files = ["memory.*usage*", "memory.limit_in_bytes"] | |
# # Get standard chrony metrics, requires chronyc executable. | |
# [[inputs.chrony]] | |
# ## If true, chronyc tries to perform a DNS lookup for the time server. | |
# # dns_lookup = false | |
# # Pull Metric Statistics from Amazon CloudWatch | |
# [[inputs.cloudwatch]] | |
# ## Amazon Region | |
# region = "us-east-1" | |
# | |
# ## Amazon Credentials | |
# ## Credentials are loaded in the following order | |
# ## 1) Assumed credentials via STS if role_arn is specified | |
# ## 2) explicit credentials from 'access_key' and 'secret_key' | |
# ## 3) shared profile from 'profile' | |
# ## 4) environment variables | |
# ## 5) shared credentials file | |
# ## 6) EC2 Instance Profile | |
# # access_key = "" | |
# # secret_key = "" | |
# # token = "" | |
# # role_arn = "" | |
# # profile = "" | |
# # shared_credential_file = "" | |
# | |
# ## Endpoint to make request against, the correct endpoint is automatically | |
# ## determined and this option should only be set if you wish to override the | |
# ## default. | |
# ## ex: endpoint_url = "http://localhost:8000" | |
# # endpoint_url = "" | |
# | |
# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all | |
# # metrics are made available to the 1 minute period. Some are collected at | |
# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. | |
# # Note that if a period is configured that is smaller than the minimum for a | |
# # particular metric, that metric will not be returned by the Cloudwatch API | |
# # and will not be collected by Telegraf. | |
# # | |
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) | |
# period = "5m" | |
# | |
# ## Collection Delay (required - must account for metrics availability via CloudWatch API) | |
# delay = "5m" | |
# | |
# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid | |
# ## gaps or overlap in pulled data | |
# interval = "5m" | |
# | |
# ## Configure the TTL for the internal cache of metrics. | |
# # cache_ttl = "1h" | |
# | |
# ## Metric Statistic Namespace (required) | |
# namespace = "AWS/ELB" | |
# | |
# ## Maximum requests per second. Note that the global default AWS rate limit is | |
# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a | |
# ## maximum of 50. | |
# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html | |
# # ratelimit = 25 | |
# | |
# ## Namespace-wide statistic filters. These allow fewer queries to be made to | |
# ## cloudwatch. | |
# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] | |
# # statistic_exclude = [] | |
# | |
# ## Metrics to Pull | |
# ## Defaults to all Metrics in Namespace if nothing is provided | |
# ## Refreshes Namespace available metrics every 1h | |
# #[[inputs.cloudwatch.metrics]] | |
# # names = ["Latency", "RequestCount"] | |
# # | |
# # ## Statistic filters for Metric. These allow for retrieving specific | |
# # ## statistics for an individual metric. | |
# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] | |
# # # statistic_exclude = [] | |
# # | |
# # ## Dimension filters for Metric. All dimensions defined for the metric names | |
# # ## must be specified in order to retrieve the metric statistics. | |
# # [[inputs.cloudwatch.metrics.dimensions]] | |
# # name = "LoadBalancerName" | |
# # value = "p-example" | |
# # Collects conntrack stats from the configured directories and files. | |
# [[inputs.conntrack]] | |
# ## The following defaults would work with multiple versions of conntrack. | |
# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across | |
# ## kernel versions, as are the directory locations. | |
# | |
# ## Superset of filenames to look for within the conntrack dirs. | |
# ## Missing files will be ignored. | |
# files = ["ip_conntrack_count","ip_conntrack_max", | |
# "nf_conntrack_count","nf_conntrack_max"] | |
# | |
# ## Directories to search within for the conntrack files above. | |
# ## Missing directrories will be ignored. | |
# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] | |
# # Gather health check statuses from services registered in Consul | |
# [[inputs.consul]] | |
# ## Consul server address | |
# # address = "localhost" | |
# | |
# ## URI scheme for the Consul server, one of "http", "https" | |
# # scheme = "http" | |
# | |
# ## ACL token used in every request | |
# # token = "" | |
# | |
# ## HTTP Basic Authentication username and password. | |
# # username = "" | |
# # password = "" | |
# | |
# ## Data center to query the health checks from | |
# # datacenter = "" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = true | |
# | |
# ## Consul checks' tag splitting | |
# # When tags are formatted like "key:value" with ":" as a delimiter then | |
# # they will be splitted and reported as proper key:value in Telegraf | |
# # tag_delimiter = ":" | |
# # Read metrics from one or many couchbase clusters | |
# [[inputs.couchbase]] | |
# ## specify servers via a url matching: | |
# ## [protocol://][:password]@address[:port] | |
# ## e.g. | |
# ## http://couchbase-0.example.com/ | |
# ## http://admin:secret@couchbase-0.example.com:8091/ | |
# ## | |
# ## If no servers are specified, then localhost is used as the host. | |
# ## If no protocol is specified, HTTP is used. | |
# ## If no port is specified, 8091 is used. | |
# servers = ["http://localhost:8091"] | |
# # Read CouchDB Stats from one or more servers | |
# [[inputs.couchdb]] | |
# ## Works with CouchDB stats endpoints out of the box | |
# ## Multiple Hosts from which to read CouchDB stats: | |
# hosts = ["http://localhost:8086/_stats"] | |
# | |
# ## Use HTTP Basic Authentication. | |
# # basic_username = "telegraf" | |
# # basic_password = "p@ssw0rd" | |
# # Input plugin for DC/OS metrics | |
# [[inputs.dcos]] | |
# ## The DC/OS cluster URL. | |
# cluster_url = "https://dcos-ee-master-1" | |
# | |
# ## The ID of the service account. | |
# service_account_id = "telegraf" | |
# ## The private key file for the service account. | |
# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" | |
# | |
# ## Path containing login token. If set, will read on every gather. | |
# # token_file = "/home/dcos/.dcos/token" | |
# | |
# ## In all filter options if both include and exclude are empty all items | |
# ## will be collected. Arrays may contain glob patterns. | |
# ## | |
# ## Node IDs to collect metrics from. If a node is excluded, no metrics will | |
# ## be collected for its containers or apps. | |
# # node_include = [] | |
# # node_exclude = [] | |
# ## Container IDs to collect container metrics from. | |
# # container_include = [] | |
# # container_exclude = [] | |
# ## Container IDs to collect app metrics from. | |
# # app_include = [] | |
# # app_exclude = [] | |
# | |
# ## Maximum concurrent connections to the cluster. | |
# # max_connections = 10 | |
# ## Maximum time to receive a response from cluster. | |
# # response_timeout = "20s" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## If false, skip chain & host verification | |
# # insecure_skip_verify = true | |
# | |
# ## Recommended filtering to reduce series cardinality. | |
# # [inputs.dcos.tagdrop] | |
# # path = ["/var/lib/mesos/slave/slaves/*"] | |
# # Read metrics from one or many disque servers | |
# [[inputs.disque]] | |
# ## An array of URI to gather stats about. Specify an ip or hostname | |
# ## with optional port and password. | |
# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. | |
# ## If no servers are specified, then localhost is used as the host. | |
# servers = ["localhost"] | |
# # Provide a native collection for dmsetup based statistics for dm-cache | |
# [[inputs.dmcache]] | |
# ## Whether to report per-device stats or not | |
# per_device = true | |
# # Query given DNS server and gives statistics | |
# [[inputs.dns_query]] | |
# ## servers to query | |
# servers = ["8.8.8.8"] | |
# | |
# ## Network is the network protocol name. | |
# # network = "udp" | |
# | |
# ## Domains or subdomains to query. | |
# # domains = ["."] | |
# | |
# ## Query record type. | |
# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. | |
# # record_type = "A" | |
# | |
# ## Dns server port. | |
# # port = 53 | |
# | |
# ## Query timeout in seconds. | |
# # timeout = 2 | |
# # Read metrics about docker containers | |
# [[inputs.docker]] | |
# ## Docker Endpoint | |
# ## To use TCP, set endpoint = "tcp://[ip]:[port]" | |
# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" | |
# endpoint = "unix:///var/run/docker.sock" | |
# | |
# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) | |
# gather_services = false | |
# | |
# ## Only collect metrics for these containers, collect all if empty | |
# container_names = [] | |
# | |
# ## Containers to include and exclude. Globs accepted. | |
# ## Note that an empty array for both will include all containers | |
# container_name_include = [] | |
# container_name_exclude = [] | |
# | |
# ## Container states to include and exclude. Globs accepted. | |
# ## When empty only containers in the "running" state will be captured. | |
# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] | |
# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] | |
# # container_state_include = [] | |
# # container_state_exclude = [] | |
# | |
# ## Timeout for docker list, info, and stats commands | |
# timeout = "5s" | |
# | |
# ## Whether to report for each container per-device blkio (8:0, 8:1...) and | |
# ## network (eth0, eth1, ...) stats or not | |
# perdevice = true | |
# ## Whether to report for each container total blkio and network stats or not | |
# total = false | |
# ## Which environment variables should we use as a tag | |
# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] | |
# | |
# ## docker labels to include and exclude as tags. Globs accepted. | |
# ## Note that an empty array for both will include all labels as tags | |
# docker_label_include = [] | |
# docker_label_exclude = [] | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Read statistics from one or many dovecot servers | |
# [[inputs.dovecot]] | |
# ## specify dovecot servers via an address:port list | |
# ## e.g. | |
# ## localhost:24242 | |
# ## | |
# ## If no servers are specified, then localhost is used as the host. | |
# servers = ["localhost:24242"] | |
# ## Type is one of "user", "domain", "ip", or "global" | |
# type = "global" | |
# ## Wildcard matches like "*.com". An empty string "" is same as "*" | |
# ## If type = "ip" filters should be <IP/network> | |
# filters = [""] | |
# # Read metrics about docker containers from Fargate/ECS v2 meta endpoints. | |
# [[inputs.ecs]] | |
# ## ECS metadata url | |
# # endpoint_url = "http://169.254.170.2" | |
# | |
# ## Containers to include and exclude. Globs accepted. | |
# ## Note that an empty array for both will include all containers | |
# # container_name_include = [] | |
# # container_name_exclude = [] | |
# | |
# ## Container states to include and exclude. Globs accepted. | |
# ## When empty only containers in the "RUNNING" state will be captured. | |
# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", | |
# ## "RESOURCES_PROVISIONED", "STOPPED". | |
# # container_status_include = [] | |
# # container_status_exclude = [] | |
# | |
# ## ecs labels to include and exclude as tags. Globs accepted. | |
# ## Note that an empty array for both will include all labels as tags | |
# ecs_label_include = [ "com.amazonaws.ecs.*" ] | |
# ecs_label_exclude = [] | |
# | |
# ## Timeout for queries. | |
# # timeout = "5s" | |
# # Read stats from one or more Elasticsearch servers or clusters | |
# [[inputs.elasticsearch]] | |
# ## specify a list of one or more Elasticsearch servers | |
# # you can add username and password to your url to use basic authentication: | |
# # servers = ["http://user:pass@localhost:9200"] | |
# servers = ["http://localhost:9200"] | |
# | |
# ## Timeout for HTTP requests to the elastic search server(s) | |
# http_timeout = "5s" | |
# | |
# ## When local is true (the default), the node will read only its own stats. | |
# ## Set local to false when you want to read the node stats from all nodes | |
# ## of the cluster. | |
# local = true | |
# | |
# ## Set cluster_health to true when you want to also obtain cluster health stats | |
# cluster_health = false | |
# | |
# ## Adjust cluster_health_level when you want to also obtain detailed health stats | |
# ## The options are | |
# ## - indices (default) | |
# ## - cluster | |
# # cluster_health_level = "indices" | |
# | |
# ## Set cluster_stats to true when you want to also obtain cluster stats. | |
# cluster_stats = false | |
# | |
# ## Only gather cluster_stats from the master node. To work this require local = true | |
# cluster_stats_only_from_master = true | |
# | |
# ## Indices to collect; can be one or more indices names or _all | |
# indices_include = ["_all"] | |
# | |
# ## One of "shards", "cluster", "indices" | |
# indices_level = "shards" | |
# | |
# ## node_stats is a list of sub-stats that you want to have gathered. Valid options | |
# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", | |
# ## "breaker". Per default, all stats are gathered. | |
# # node_stats = ["jvm", "http"] | |
# | |
# ## HTTP Basic Authentication username and password. | |
# # username = "" | |
# # password = "" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Read metrics from one or more commands that can output to stdout | |
# [[inputs.exec]] | |
# ## Commands array | |
# commands = [ | |
# "/tmp/test.sh", | |
# "/usr/bin/mycollector --foo=bar", | |
# "/tmp/collect_*.sh" | |
# ] | |
# | |
# ## Timeout for each command to complete. | |
# timeout = "5s" | |
# | |
# ## measurement name suffix (for separating different commands) | |
# name_suffix = "_mycollector" | |
# | |
# ## Data format to consume. | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md | |
# data_format = "influx" | |
# # Read metrics from fail2ban. | |
# [[inputs.fail2ban]] | |
# ## Use sudo to run fail2ban-client | |
# use_sudo = false | |
# # Read devices value(s) from a Fibaro controller | |
# [[inputs.fibaro]] | |
# ## Required Fibaro controller address/hostname. | |
# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available | |
# url = "http://<controller>:80" | |
# | |
# ## Required credentials to access the API (http://<controller/api/<component>) | |
# username = "<username>" | |
# password = "<password>" | |
# | |
# ## Amount of time allowed to complete the HTTP request | |
# # timeout = "5s" | |
# # Reload and gather from file[s] on telegraf's interval. | |
# [[inputs.file]] | |
# ## Files to parse each interval. | |
# ## These accept standard unix glob matching rules, but with the addition of | |
# ## ** as a "super asterisk". ie: | |
# ## /var/log/**.log -> recursively find all .log files in /var/log | |
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log | |
# ## /var/log/apache.log -> only read the apache log file | |
# files = ["/var/log/apache/access.log"] | |
# | |
# ## The dataformat to be read from files | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md | |
# data_format = "influx" | |
# # Count files in a directory | |
# [[inputs.filecount]] | |
# ## Directory to gather stats about. | |
# ## deprecated in 1.9; use the directories option | |
# # directory = "/var/cache/apt/archives" | |
# | |
# ## Directories to gather stats about. | |
# ## This accept standard unit glob matching rules, but with the addition of | |
# ## ** as a "super asterisk". ie: | |
# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories | |
# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories | |
# ## /var/log -> count all files in /var/log and all of its subdirectories | |
# directories = ["/var/cache/apt/archives"] | |
# | |
# ## Only count files that match the name pattern. Defaults to "*". | |
# name = "*.deb" | |
# | |
# ## Count files in subdirectories. Defaults to true. | |
# recursive = false | |
# | |
# ## Only count regular files. Defaults to true. | |
# regular_only = true | |
# | |
# ## Only count files that are at least this size. If size is | |
# ## a negative number, only count files that are smaller than the | |
# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... | |
# ## Without quotes and units, interpreted as size in bytes. | |
# size = "0B" | |
# | |
# ## Only count files that have not been touched for at least this | |
# ## duration. If mtime is negative, only count files that have been | |
# ## touched in this duration. Defaults to "0s". | |
# mtime = "0s" | |
# # Read stats about given file(s) | |
# [[inputs.filestat]] | |
# ## Files to gather stats about. | |
# ## These accept standard unix glob matching rules, but with the addition of | |
# ## ** as a "super asterisk". ie: | |
# ## "/var/log/**.log" -> recursively find all .log files in /var/log | |
# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log | |
# ## "/var/log/apache.log" -> just tail the apache log file | |
# ## | |
# ## See https://github.com/gobwas/glob for more examples | |
# ## | |
# files = ["/var/log/**.log"] | |
# ## If true, read the entire file and calculate an md5 checksum. | |
# md5 = false | |
# # Read real time temps from fireboard.io servers | |
# [[inputs.fireboard]] | |
# ## Specify auth token for your account | |
# auth_token = "invalidAuthToken" | |
# ## You can override the fireboard server URL if necessary | |
# # url = https://fireboard.io/api/v1/devices.json | |
# ## You can set a different http_timeout if you need to | |
# ## You should set a string using an number and time indicator | |
# ## for example "12s" for 12 seconds. | |
# # http_timeout = "4s" | |
# # Read metrics exposed by fluentd in_monitor plugin | |
# [[inputs.fluentd]] | |
# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). | |
# ## | |
# ## Endpoint: | |
# ## - only one URI is allowed | |
# ## - https is not supported | |
# endpoint = "http://localhost:24220/api/plugins.json" | |
# | |
# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) | |
# exclude = [ | |
# "monitor_agent", | |
# "dummy", | |
# ] | |
# # Gather repository information from GitHub hosted repositories. | |
# [[inputs.github]] | |
# ## List of repositories to monitor. | |
# repositories = [ | |
# "influxdata/telegraf", | |
# "influxdata/influxdb" | |
# ] | |
# | |
# ## Github API access token. Unauthenticated requests are limited to 60 per hour. | |
# # access_token = "" | |
# | |
# ## Github API enterprise url. Github Enterprise accounts must specify their base url. | |
# # enterprise_base_url = "" | |
# | |
# ## Timeout for HTTP requests. | |
# # http_timeout = "5s" | |
# # Read flattened metrics from one or more GrayLog HTTP endpoints | |
# [[inputs.graylog]] | |
# ## API endpoint, currently supported API: | |
# ## | |
# ## - multiple (Ex http://<host>:12900/system/metrics/multiple) | |
# ## - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace}) | |
# ## | |
# ## For namespace endpoint, the metrics array will be ignored for that call. | |
# ## Endpoint can contain namespace and multiple type calls. | |
# ## | |
# ## Please check http://[graylog-server-ip]:12900/api-browser for full list | |
# ## of endpoints | |
# servers = [ | |
# "http://[graylog-server-ip]:12900/system/metrics/multiple", | |
# ] | |
# | |
# ## Metrics list | |
# ## List of metrics can be found on Graylog webservice documentation. | |
# ## Or by hitting the the web service api at: | |
# ## http://[graylog-host]:12900/system/metrics | |
# metrics = [ | |
# "jvm.cl.loaded", | |
# "jvm.memory.pools.Metaspace.committed" | |
# ] | |
# | |
# ## Username and password | |
# username = "" | |
# password = "" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Read metrics of haproxy, via socket or csv stats page | |
# [[inputs.haproxy]] | |
# ## An array of address to gather stats about. Specify an ip on hostname | |
# ## with optional port. ie localhost, 10.10.3.33:1936, etc. | |
# ## Make sure you specify the complete path to the stats endpoint | |
# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats | |
# | |
# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats | |
# servers = ["http://myhaproxy.com:1936/haproxy?stats"] | |
# | |
# ## Credentials for basic HTTP authentication | |
# # username = "admin" | |
# # password = "admin" | |
# | |
# ## You can also use local socket with standard wildcard globbing. | |
# ## Server address not starting with 'http' will be treated as a possible | |
# ## socket, so both examples below are valid. | |
# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] | |
# | |
# ## By default, some of the fields are renamed from what haproxy calls them. | |
# ## Setting this option to true results in the plugin keeping the original | |
# ## field names. | |
# # keep_field_names = false | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Monitor disks' temperatures using hddtemp | |
# [[inputs.hddtemp]] | |
# ## By default, telegraf gathers temps data from all disks detected by the | |
# ## hddtemp. | |
# ## | |
# ## Only collect temps from the selected disks. | |
# ## | |
# ## A * as the device name will return the temperature values of all disks. | |
# ## | |
# # address = "127.0.0.1:7634" | |
# # devices = ["sda", "*"] | |
# # Read formatted metrics from one or more HTTP endpoints | |
# [[inputs.http]] | |
# ## One or more URLs from which to read formatted metrics | |
# urls = [ | |
# "http://localhost/metrics" | |
# ] | |
# | |
# ## HTTP method | |
# # method = "GET" | |
# | |
# ## Optional HTTP headers | |
# # headers = {"X-Special-Header" = "Special-Value"} | |
# | |
# ## Optional HTTP Basic Auth Credentials | |
# # username = "username" | |
# # password = "pa$$word" | |
# | |
# ## HTTP entity-body to send with POST/PUT requests. | |
# # body = "" | |
# | |
# ## HTTP Content-Encoding for write request body, can be set to "gzip" to | |
# ## compress body or "identity" to apply no encoding. | |
# # content_encoding = "identity" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## Amount of time allowed to complete the HTTP request | |
# # timeout = "5s" | |
# | |
# ## Data format to consume. | |
# ## Each data format has its own unique set of configuration options, read | |
# ## more about them here: | |
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md | |
# # data_format = "influx" | |
# # HTTP/HTTPS request given an address a method and a timeout | |
# [[inputs.http_response]] | |
# ## Deprecated in 1.12, use 'urls' | |
# ## Server address (default http://localhost) | |
# # address = "http://localhost" | |
# | |
# ## List of urls to query. | |
# # urls = ["http://localhost"] | |
# | |
# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) | |
# # http_proxy = "http://localhost:8888" | |
# | |
# ## Set response_timeout (default 5 seconds) | |
# # response_timeout = "5s" | |
# | |
# ## HTTP Request Method | |
# # method = "GET" | |
# | |
# ## Whether to follow redirects from the server (defaults to false) | |
# # follow_redirects = false | |
# | |
# ## Optional HTTP Request Body | |
# # body = ''' | |
# # {'fake':'data'} | |
# # ''' | |
# | |
# ## Optional substring or regex match in body of the response | |
# # response_string_match = "\"service_status\": \"up\"" | |
# # response_string_match = "ok" | |
# # response_string_match = "\".*_status\".?:.?\"up\"" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## HTTP Request Headers (all values must be strings) | |
# # [inputs.http_response.headers] | |
# # Host = "github.com" | |
# | |
# ## Interface to use when dialing an address | |
# # interface = "eth0" | |
# # Read flattened metrics from one or more JSON HTTP endpoints | |
# [[inputs.httpjson]] | |
# ## NOTE This plugin only reads numerical measurements, strings and booleans | |
# ## will be ignored. | |
# | |
# ## Name for the service being polled. Will be appended to the name of the | |
# ## measurement e.g. httpjson_webserver_stats | |
# ## | |
# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. | |
# name = "webserver_stats" | |
# | |
# ## URL of each server in the service's cluster | |
# servers = [ | |
# "http://localhost:9999/stats/", | |
# "http://localhost:9998/stats/", | |
# ] | |
# ## Set response_timeout (default 5 seconds) | |
# response_timeout = "5s" | |
# | |
# ## HTTP method to use: GET or POST (case-sensitive) | |
# method = "GET" | |
# | |
# ## List of tag names to extract from top-level of JSON server response | |
# # tag_keys = [ | |
# # "my_tag_1", | |
# # "my_tag_2" | |
# # ] | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## HTTP parameters (all values must be strings). For "GET" requests, data | |
# ## will be included in the query. For "POST" requests, data will be included | |
# ## in the request body as "x-www-form-urlencoded". | |
# # [inputs.httpjson.parameters] | |
# # event_type = "cpu_spike" | |
# # threshold = "0.75" | |
# | |
# ## HTTP Headers (all values must be strings) | |
# # [inputs.httpjson.headers] | |
# # X-Auth-Token = "my-xauth-token" | |
# # apiVersion = "v1" | |
# # Gather Icinga2 status | |
# [[inputs.icinga2]] | |
# ## Required Icinga2 server address (default: "https://localhost:5665") | |
# # server = "https://localhost:5665" | |
# | |
# ## Required Icinga2 object type ("services" or "hosts, default "services") | |
# # object_type = "services" | |
# | |
# ## Credentials for basic HTTP authentication | |
# # username = "admin" | |
# # password = "admin" | |
# | |
# ## Maximum time to receive response. | |
# # response_timeout = "5s" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = true | |
# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints | |
# [[inputs.influxdb]] | |
# ## Works with InfluxDB debug endpoints out of the box, | |
# ## but other services can use this format too. | |
# ## See the influxdb plugin's README for more details. | |
# | |
# ## Multiple URLs from which to read InfluxDB-formatted JSON | |
# ## Default is "http://localhost:8086/debug/vars". | |
# urls = [ | |
# "http://localhost:8086/debug/vars" | |
# ] | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## http request & header timeout | |
# timeout = "5s" | |
# # Collect statistics about itself | |
# [[inputs.internal]] | |
# ## If true, collect telegraf memory stats. | |
# # collect_memstats = true | |
# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. | |
# [[inputs.interrupts]] | |
# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is | |
# ## stored as a field. | |
# ## | |
# ## The default is false for backwards compatibility, and will be changed to | |
# ## true in a future version. It is recommended to set to true on new | |
# ## deployments. | |
# # cpu_as_tag = false | |
# | |
# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. | |
# # [inputs.interrupts.tagdrop] | |
# # irq = [ "NET_RX", "TASKLET" ] | |
# # Read metrics from the bare metal servers via IPMI | |
# [[inputs.ipmi_sensor]] | |
# ## optionally specify the path to the ipmitool executable | |
# # path = "/usr/bin/ipmitool" | |
# ## | |
# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR | |
# # privilege = "ADMINISTRATOR" | |
# ## | |
# ## optionally specify one or more servers via a url matching | |
# ## [username[:password]@][protocol[(address)]] | |
# ## e.g. | |
# ## root:passwd@lan(127.0.0.1) | |
# ## | |
# ## if no servers are specified, local machine sensor stats will be queried | |
# ## | |
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] | |
# | |
# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid | |
# ## gaps or overlap in pulled data | |
# interval = "30s" | |
# | |
# ## Timeout for the ipmitool command to complete | |
# timeout = "20s" | |
# | |
# ## Schema Version: (Optional, defaults to version 1) | |
# metric_version = 2 | |
# # Gather packets and bytes counters from Linux ipsets | |
# [[inputs.ipset]] | |
# ## By default, we only show sets which have already matched at least 1 packet. | |
# ## set include_unmatched_sets = true to gather them all. | |
# include_unmatched_sets = false | |
# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") | |
# use_sudo = false | |
# ## The default timeout of 1s for ipset execution can be overridden here: | |
# # timeout = "1s" | |
# # Gather packets and bytes throughput from iptables | |
# [[inputs.iptables]] | |
# ## iptables require root access on most systems. | |
# ## Setting 'use_sudo' to true will make use of sudo to run iptables. | |
# ## Users must configure sudo to allow telegraf user to run iptables with no password. | |
# ## iptables can be restricted to only list command "iptables -nvL". | |
# use_sudo = false | |
# ## Setting 'use_lock' to true runs iptables with the "-w" option. | |
# ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") | |
# use_lock = false | |
# ## Define an alternate executable, such as "ip6tables". Default is "iptables". | |
# # binary = "ip6tables" | |
# ## defines the table to monitor: | |
# table = "filter" | |
# ## defines the chains to monitor. | |
# ## NOTE: iptables rules without a comment will not be monitored. | |
# ## Read the plugin documentation for more information. | |
# chains = [ "INPUT" ] | |
# # Collect virtual and real server stats from Linux IPVS | |
# [[inputs.ipvs]] | |
# # no configuration | |
# # Read jobs and cluster metrics from Jenkins instances | |
# [[inputs.jenkins]] | |
# ## The Jenkins URL | |
# url = "http://my-jenkins-instance:8080" | |
# # username = "admin" | |
# # password = "admin" | |
# | |
# ## Set response_timeout | |
# response_timeout = "5s" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use SSL but skip chain & host verification | |
# # insecure_skip_verify = false | |
# | |
# ## Optional Max Job Build Age filter | |
# ## Default 1 hour, ignore builds older than max_build_age | |
# # max_build_age = "1h" | |
# | |
# ## Optional Sub Job Depth filter | |
# ## Jenkins can have unlimited layer of sub jobs | |
# ## This config will limit the layers of pulling, default value 0 means | |
# ## unlimited pulling until no more sub jobs | |
# # max_subjob_depth = 0 | |
# | |
# ## Optional Sub Job Per Layer | |
# ## In workflow-multibranch-plugin, each branch will be created as a sub job. | |
# ## This config will limit to call only the lasted branches in each layer, | |
# ## empty will use default value 10 | |
# # max_subjob_per_layer = 10 | |
# | |
# ## Jobs to exclude from gathering | |
# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] | |
# | |
# ## Nodes to exclude from gathering | |
# # node_exclude = [ "node1", "node2" ] | |
# | |
# ## Worker pool for jenkins plugin only | |
# ## Empty this field will use default value 5 | |
# # max_connections = 5 | |
# # Read JMX metrics through Jolokia | |
# [[inputs.jolokia]] | |
# # DEPRECATED: the jolokia plugin has been deprecated in favor of the | |
# # jolokia2 plugin | |
# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 | |
# | |
# ## This is the context root used to compose the jolokia url | |
# ## NOTE that Jolokia requires a trailing slash at the end of the context root | |
# ## NOTE that your jolokia security policy must allow for POST requests. | |
# context = "/jolokia/" | |
# | |
# ## This specifies the mode used | |
# # mode = "proxy" | |
# # | |
# ## When in proxy mode this section is used to specify further | |
# ## proxy address configurations. | |
# ## Remember to change host address to fit your environment. | |
# # [inputs.jolokia.proxy] | |
# # host = "127.0.0.1" | |
# # port = "8080" | |
# | |
# ## Optional http timeouts | |
# ## | |
# ## response_header_timeout, if non-zero, specifies the amount of time to wait | |
# ## for a server's response headers after fully writing the request. | |
# # response_header_timeout = "3s" | |
# ## | |
# ## client_timeout specifies a time limit for requests made by this client. | |
# ## Includes connection time, any redirects, and reading the response body. | |
# # client_timeout = "4s" | |
# | |
# ## Attribute delimiter | |
# ## | |
# ## When multiple attributes are returned for a single | |
# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric | |
# ## name, and the attribute name, separated by the given delimiter. | |
# # delimiter = "_" | |
# | |
# ## List of servers exposing jolokia read service | |
# [[inputs.jolokia.servers]] | |
# name = "as-server-01" | |
# host = "127.0.0.1" | |
# port = "8080" | |
# # username = "myuser" | |
# # password = "mypassword" | |
# | |
# ## List of metrics collected on above servers | |
# ## Each metric consists in a name, a jmx path and either | |
# ## a pass or drop slice attribute. | |
# ## This collect all heap memory usage metrics. | |
# [[inputs.jolokia.metrics]] | |
# name = "heap_memory_usage" | |
# mbean = "java.lang:type=Memory" | |
# attribute = "HeapMemoryUsage" | |
# | |
# ## This collect thread counts metrics. | |
# [[inputs.jolokia.metrics]] | |
# name = "thread_count" | |
# mbean = "java.lang:type=Threading" | |
# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" | |
# | |
# ## This collect number of class loaded/unloaded counts metrics. | |
# [[inputs.jolokia.metrics]] | |
# name = "class_count" | |
# mbean = "java.lang:type=ClassLoading" | |
# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" | |
# # Read JMX metrics from a Jolokia REST agent endpoint | |
# [[inputs.jolokia2_agent]] | |
# # default_tag_prefix = "" | |
# # default_field_prefix = "" | |
# # default_field_separator = "." | |
# | |
# # Add agents URLs to query | |
# urls = ["http://localhost:8080/jolokia"] | |
# # username = "" | |
# # password = "" | |
# # response_timeout = "5s" | |
# | |
# ## Optional TLS config | |
# # tls_ca = "/var/private/ca.pem" | |
# # tls_cert = "/var/private/client.pem" | |
# # tls_key = "/var/private/client-key.pem" | |
# # insecure_skip_verify = false | |
# | |
# ## Add metrics to read | |
# [[inputs.jolokia2_agent.metric]] | |
# name = "java_runtime" | |
# mbean = "java.lang:type=Runtime" | |
# paths = ["Uptime"] | |
# # Read JMX metrics from a Jolokia REST proxy endpoint | |
# [[inputs.jolokia2_proxy]] | |
# # default_tag_prefix = "" | |
# # default_field_prefix = "" | |
# # default_field_separator = "." | |
# | |
# ## Proxy agent | |
# url = "http://localhost:8080/jolokia" | |
# # username = "" | |
# # password = "" | |
# # response_timeout = "5s" | |
# | |
# ## Optional TLS config | |
# # tls_ca = "/var/private/ca.pem" | |
# # tls_cert = "/var/private/client.pem" | |
# # tls_key = "/var/private/client-key.pem" | |
# # insecure_skip_verify = false | |
# | |
# ## Add proxy targets to query | |
# # default_target_username = "" | |
# # default_target_password = "" | |
# [[inputs.jolokia2_proxy.target]] | |
# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" | |
# # username = "" | |
# # password = "" | |
# | |
# ## Add metrics to read | |
# [[inputs.jolokia2_proxy.metric]] | |
# name = "java_runtime" | |
# mbean = "java.lang:type=Runtime" | |
# paths = ["Uptime"] | |
# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints | |
# [[inputs.kapacitor]] | |
# ## Multiple URLs from which to read Kapacitor-formatted JSON | |
# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". | |
# urls = [ | |
# "http://localhost:9092/kapacitor/v1/debug/vars" | |
# ] | |
# | |
# ## Time limit for http requests | |
# timeout = "5s" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Get kernel statistics from /proc/vmstat | |
# [[inputs.kernel_vmstat]] | |
# # no configuration | |
# # Read status information from one or more Kibana servers | |
# [[inputs.kibana]] | |
# ## specify a list of one or more Kibana servers | |
# servers = ["http://localhost:5601"] | |
# | |
# ## Timeout for HTTP requests | |
# timeout = "5s" | |
# | |
# ## HTTP Basic Auth credentials | |
# # username = "username" | |
# # password = "pa$$word" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Read metrics from the Kubernetes api | |
# [[inputs.kube_inventory]] | |
# ## URL for the Kubernetes API | |
# url = "https://127.0.0.1" | |
# | |
# ## Namespace to use. Set to "" to use all namespaces. | |
# # namespace = "default" | |
# | |
# ## Use bearer token for authorization. ('bearer_token' takes priority) | |
# # bearer_token = "/path/to/bearer/token" | |
# ## OR | |
# # bearer_token_string = "abc_123" | |
# | |
# ## Set response_timeout (default 5 seconds) | |
# # response_timeout = "5s" | |
# | |
# ## Optional Resources to exclude from gathering | |
# ## Leave them with blank with try to gather everything available. | |
# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", | |
# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" | |
# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] | |
# | |
# ## Optional Resources to include when gathering | |
# ## Overrides resource_exclude if both set. | |
# # resource_include = [ "deployments", "nodes", "statefulsets" ] | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/path/to/cafile" | |
# # tls_cert = "/path/to/certfile" | |
# # tls_key = "/path/to/keyfile" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Read metrics from the kubernetes kubelet api | |
# [[inputs.kubernetes]] | |
# ## URL for the kubelet | |
# url = "http://127.0.0.1:10255" | |
# | |
# ## Use bearer token for authorization. ('bearer_token' takes priority) | |
# # bearer_token = "/path/to/bearer/token" | |
# ## OR | |
# # bearer_token_string = "abc_123" | |
# | |
# ## Set response_timeout (default 5 seconds) | |
# # response_timeout = "5s" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = /path/to/cafile | |
# # tls_cert = /path/to/certfile | |
# # tls_key = /path/to/keyfile | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Read metrics from a LeoFS Server via SNMP | |
# [[inputs.leofs]] | |
# ## An array of URLs of the form: | |
# ## host [ ":" port] | |
# servers = ["127.0.0.1:4020"] | |
# # Provides Linux sysctl fs metrics | |
# [[inputs.linux_sysctl_fs]] | |
# # no configuration | |
# # Read metrics exposed by Logstash | |
# [[inputs.logstash]] | |
# ## The URL of the exposed Logstash API endpoint. | |
# url = "http://127.0.0.1:9600" | |
# | |
# ## Use Logstash 5 single pipeline API, set to true when monitoring | |
# ## Logstash 5. | |
# # single_pipeline = false | |
# | |
# ## Enable optional collection components. Can contain | |
# ## "pipelines", "process", and "jvm". | |
# # collect = ["pipelines", "process", "jvm"] | |
# | |
# ## Timeout for HTTP requests. | |
# # timeout = "5s" | |
# | |
# ## Optional HTTP Basic Auth credentials. | |
# # username = "username" | |
# # password = "pa$$word" | |
# | |
# ## Optional TLS Config. | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# | |
# ## Use TLS but skip chain & host verification. | |
# # insecure_skip_verify = false | |
# | |
# ## Optional HTTP headers. | |
# # [inputs.logstash.headers] | |
# # "X-Special-Header" = "Special-Value" | |
# # Read metrics from local Lustre service on OST, MDS | |
# [[inputs.lustre2]] | |
# ## An array of /proc globs to search for Lustre stats | |
# ## If not specified, the default will work on Lustre 2.5.x | |
# ## | |
# # ost_procfiles = [ | |
# # "/proc/fs/lustre/obdfilter/*/stats", | |
# # "/proc/fs/lustre/osd-ldiskfs/*/stats", | |
# # "/proc/fs/lustre/obdfilter/*/job_stats", | |
# # ] | |
# # mds_procfiles = [ | |
# # "/proc/fs/lustre/mdt/*/md_stats", | |
# # "/proc/fs/lustre/mdt/*/job_stats", | |
# # ] | |
# # Gathers metrics from the /3.0/reports MailChimp API | |
# [[inputs.mailchimp]] | |
# ## MailChimp API key | |
# ## get from https://admin.mailchimp.com/account/api/ | |
# api_key = "" # required | |
# ## Reports for campaigns sent more than days_old ago will not be collected. | |
# ## 0 means collect all. | |
# days_old = 0 | |
# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old | |
# # campaign_id = "" | |
# # Retrives information on a specific host in a MarkLogic Cluster | |
# [[inputs.marklogic]] | |
# ## Base URL of the MarkLogic HTTP Server. | |
# url = "http://localhost:8002" | |
# | |
# ## List of specific hostnames to retrieve information. At least (1) required. | |
# # hosts = ["hostname1", "hostname2"] | |
# | |
# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges | |
# # username = "myuser" | |
# # password = "mypassword" | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Read metrics from one or many mcrouter servers | |
# [[inputs.mcrouter]] | |
# ## An array of address to gather stats about. Specify an ip or hostname | |
# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. | |
# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] | |
# | |
# ## Timeout for metric collections from all servers. Minimum timeout is "1s". | |
# # timeout = "5s" | |
# # Read metrics from one or many memcached servers | |
# [[inputs.memcached]] | |
# ## An array of address to gather stats about. Specify an ip on hostname | |
# ## with optional port. ie localhost, 10.0.0.1:11211, etc. | |
# servers = ["localhost:11211"] | |
# # unix_sockets = ["/var/run/memcached.sock"] | |
# # Telegraf plugin for gathering metrics from N Mesos masters | |
# [[inputs.mesos]] | |
# ## Timeout, in ms. | |
# timeout = 100 | |
# ## A list of Mesos masters. | |
# masters = ["http://localhost:5050"] | |
# ## Master metrics groups to be collected, by default, all enabled. | |
# master_collections = [ | |
# "resources", | |
# "master", | |
# "system", | |
# "agents", | |
# "frameworks", | |
# "framework_offers", | |
# "tasks", | |
# "messages", | |
# "evqueue", | |
# "registrar", | |
# "allocator", | |
# ] | |
# ## A list of Mesos slaves, default is [] | |
# # slaves = [] | |
# ## Slave metrics groups to be collected, by default, all enabled. | |
# # slave_collections = [ | |
# # "resources", | |
# # "agent", | |
# # "system", | |
# # "executors", | |
# # "tasks", | |
# # "messages", | |
# # ] | |
# | |
# ## Optional TLS Config | |
# # tls_ca = "/etc/telegraf/ca.pem" | |
# # tls_cert = "/etc/telegraf/cert.pem" | |
# # tls_key = "/etc/telegraf/key.pem" | |
# ## Use TLS but skip chain & host verification | |
# # insecure_skip_verify = false | |
# # Collects scores from a Minecraft server's scoreboard using the RCON protocol | |
# [[inputs.minecraft]] | |
# ## Address of the Minecraft server. | |
# # server = "localhost" | |
# | |
# ## Server RCON Port. | |
# # port = "25575" | |
# | |
# ## Server RCON Password. | |
# password = "" | |
# | |
# ## Uncomment to remove deprecated metric components. | |
# # tagdrop = ["server"] | |