Skip to content

Instantly share code, notes, and snippets.

@trentonstrong
Created October 14, 2015 17:31
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save trentonstrong/c4b5bfa86e3752d7a7dd to your computer and use it in GitHub Desktop.
Save trentonstrong/c4b5bfa86e3752d7a7dd to your computer and use it in GitHub Desktop.
dd-agent config
[Main]
# The host of the Datadog intake server to send Agent data to
dd_url: https://app.datadoghq.com
# If you need a proxy to connect to the Internet, provide the settings here
# proxy_host: my-proxy.com
# proxy_port: 3128
# proxy_user: user
# proxy_password: password
# To be used with some proxys that return a 302 which make curl switch from POST to GET
# See http://stackoverflow.com/questions/8156073/curl-violate-rfc-2616-10-3-2-and-switch-from-post-to-get
# proxy_forbid_method_switch: no
# If you run the agent behind haproxy, you might want to set this to yes
# skip_ssl_validation: no
# The Datadog api key to associate your Agent's data with your organization.
# Can be found here:
# https://app.datadoghq.com/account/settings
api_key:
# Force the hostname to whatever you want.
#hostname: mymachine.mydomain
# Set the host's tags
#tags: mytag0, mytag1
# Add one "dd_check:checkname" tag per running check. It makes it possible to slice
# and dice per monitored app (= running Agent Check) on Datadog's backend.
# create_dd_check_tags: no
# Collect AWS EC2 custom tags as agent tags
# collect_ec2_tags: no
# Enable Agent Developer Mode
# Agent Developer Mode collects and sends more fine-grained metrics about agent and check performance
# developer_mode: no
# In developer mode, the number of runs to be included in a single collector profile
# collector_profile_interval: 20
# Collect instance metadata
# The Agent will try to collect instance metadata for EC2 and GCE instances by
# trying to connect to the local endpoint: http://169.254.169.254
# See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html
# and https://developers.google.com/compute/docs/metadata
# for more information
# collect_instance_metadata: yes
# use unique hostname for GCE hosts, see http://dtdg.co/1eAynZk
gce_updated_hostname: yes
# Set the threshold for accepting points to allow anything
# with recent_point_threshold seconds
# Defaults to 30 seconds if no value is provided
# recent_point_threshold: 30
# Use mount points instead of volumes to track disk and fs metrics
# DEPRECATED: use conf.d/disk.yaml instead to configure it
use_mount: no
# Change port the Agent is listening to
# listen_port: 17123
# Start a graphite listener on this port
# graphite_listen_port: 17124
# Additional directory to look for Datadog checks
# additional_checksd: /etc/dd-agent/checks.d/
# Allow non-local traffic to this Agent
# This is required when using this Agent as a proxy for other Agents
# that might not have an internet connection
# For more information, please see
# https://github.com/DataDog/dd-agent/wiki/Network-Traffic-and-Proxy-Configuration
# non_local_traffic: no
# Select the Tornado HTTP Client in the forwarder
# Default to the simple http client
# use_curl_http_client: False
# The loopback address the Forwarder and Dogstatsd will bind.
# Optional, it is mainly used when running the agent on Openshift
# bind_host: localhost
# If enabled the collector will capture a metric for check run times.
# check_timings: no
# If you want to remove the 'ww' flag from ps catching the arguments of processes
# for instance for security reasons
# exclude_process_args: no
# histogram_aggregates: max, median, avg, count
# histogram_percentiles: 0.95
# ========================================================================== #
# DogStatsd configuration #
# ========================================================================== #
# If you don't want to enable the DogStatsd server, set this option to no
# use_dogstatsd: yes
# DogStatsd is a small server that aggregates your custom app metrics. For
# usage information, check out http://api.datadoghq.com
# Make sure your client is sending to the same port.
# dogstatsd_port : 8125
# By default dogstatsd will post aggregate metrics to the Agent (which handles
# errors/timeouts/retries/etc). To send directly to the datadog api, set this
# to https://app.datadoghq.com.
# dogstatsd_target : http://localhost:17123
# If you want to forward every packet received by the dogstatsd server
# to another statsd server, uncomment these lines.
# WARNING: Make sure that forwarded packets are regular statsd packets and not "dogstatsd" packets,
# as your other statsd server might not be able to handle them.
# statsd_forward_host: address_of_own_statsd_server
# statsd_forward_port: 8125
# you may want all statsd metrics coming from this host to be namespaced
# in some way; if so, configure your namespace here. a metric that looks
# like `metric.name` will instead become `namespace.metric.name`
# statsd_metric_namespace:
# By default, dogstatsd supports only plain ASCII packets. However, most
# (dog)statsd client support UTF8 by encoding packets before sending them
# this option enables UTF8 decoding in case you need it.
# However, it comes with a performance overhead of ~10% in the dogstatsd
# server. This will be taken care of properly in the new gen agent core.
# utf8_decoding: false
# ========================================================================== #
# Service-specific configuration #
# ========================================================================== #
# -------------------------------------------------------------------------- #
# Disk #
# -------------------------------------------------------------------------- #
# Some infrastrucures have many constantly changing virtual devices (e.g. folks
# running constantly churning linux containers) whose metrics aren't
# interesting for datadog. To filter out a particular pattern of devices
# from collection, configure a regex here:
# DEPRECATED: use conf.d/disk.yaml instead to configure it
# device_blacklist_re: .*\/dev\/mapper\/lxc-box.*
# -------------------------------------------------------------------------- #
# Ganglia #
# -------------------------------------------------------------------------- #
# Ganglia host where gmetad is running
#ganglia_host: localhost
# Ganglia port where gmetad is running
#ganglia_port: 8651
# -------------------------------------------------------------------------- #
# Dogstream (log file parser)
# -------------------------------------------------------------------------- #
# Comma-separated list of logs to parse and optionally custom parsers to use.
# The form should look like this:
#
# dogstreams: /path/to/log1:parsers_module:custom_parser, /path/to/log2, /path/to/log3, ...
#
# Or this:
#
# dogstreams: /path/to/log1:/path/to/my/parsers_module.py:custom_parser, /path/to/log2, /path/to/log3, ...
#
# Each entry is a path to a log file and optionally a Python module/function pair
# separated by colons.
#
# Custom parsers should take a 2 parameters, a logger object and
# a string parameter of the current line to parse. It should return a tuple of
# the form:
# (metric (str), timestamp (unix timestamp), value (float), attributes (dict))
# where attributes should at least contain the key 'metric_type', specifying
# whether the given metric is a 'counter' or 'gauge'.
#
# Unless parsers are specified with an absolute path, the modules must exist in
# the Agent's PYTHONPATH. You can set this as an environment variable when
# starting the Agent. If the name of the custom parser function is not passed,
# 'parser' is assumed.
#
# If this value isn't specified, the default parser assumes this log format:
# metric timestamp value key0=val0 key1=val1 ...
#
# ========================================================================== #
# Custom Emitters #
# ========================================================================== #
# Comma-separated list of emitters to be used in addition to the standard one
#
# Expected to be passed as a comma-separated list of colon-delimited
# name/object pairs.
#
# custom_emitters: /usr/local/my-code/emitters/rabbitmq.py:RabbitMQEmitter
#
# If the name of the emitter function is not specified, 'emitter' is assumed.
# ========================================================================== #
# Logging
# ========================================================================== #
# log_level: INFO
# collector_log_file: /var/log/datadog/collector.log
# forwarder_log_file: /var/log/datadog/forwarder.log
# dogstatsd_log_file: /var/log/datadog/dogstatsd.log
# if syslog is enabled but a host and port are not set, a local domain socket
# connection will be attempted
#
# log_to_syslog: yes
# syslog_host:
# syslog_port:
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment