-
-
Save ryan-williams/1fe3859c0be6ac580ea8 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
### Welcome to the InfluxDB configuration file. | |
# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com | |
# The data includes raft id (random 8 bytes), os, arch, version, and metadata. | |
# We don't track ip addresses of servers reporting. This is only used | |
# to track the number of instances running and the versions, which | |
# is very helpful for us. | |
# Change this option to true to disable reporting. | |
reporting-disabled = false | |
### | |
### Enterprise registration control | |
### | |
[registration] | |
# enabled = true | |
# url = "https://enterprise.influxdata.com" # The Enterprise server URL | |
# token = "" # Registration token for Enterprise server | |
### | |
### [meta] | |
### | |
### Controls the parameters for the Raft consensus group that stores metadata | |
### about the InfluxDB cluster. | |
### | |
[meta] | |
dir = "/Users/ryan/c/go/var/opt/influxdb/meta" | |
hostname = "localhost" | |
bind-address = ":8088" | |
retention-autocreate = true | |
election-timeout = "1s" | |
heartbeat-timeout = "1s" | |
leader-lease-timeout = "500ms" | |
commit-timeout = "50ms" | |
### | |
### [data] | |
### | |
### Controls where the actual shard data for InfluxDB lives and how it is | |
### flushed from the WAL. "dir" may need to be changed to a suitable place | |
### for your system, but the WAL settings are an advanced configuration. The | |
### defaults should work for most systems. | |
### | |
[data] | |
dir = "/Users/ryan/c/go/var/opt/influxdb/data" | |
# Controls the engine type for new shards. Options are b1, bz1, or tsm1. | |
# b1 is the 0.9.2 storage engine, bz1 is the 0.9.3 and 0.9.4 engine. | |
# tsm1 is the 0.9.5 engine and is currenly EXPERIMENTAL. Until 0.9.5 is | |
# actually released data written into a tsm1 engine may be need to be wiped | |
# between upgrades. | |
engine ="tsm1" | |
# The following WAL settings are for the b1 storage engine used in 0.9.2. They won't | |
# apply to any new shards created after upgrading to a version > 0.9.3. | |
max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB. | |
wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush. | |
wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed. | |
# These are the WAL settings for the storage engine >= 0.9.3 | |
wal-dir = "/Users/ryan/c/go/var/opt/influxdb/wal" | |
wal-enable-logging = true | |
# When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to | |
# flush to the index | |
# wal-ready-series-size = 25600 | |
# Flush and compact a partition once this ratio of series are over the ready size | |
# wal-compaction-threshold = 0.6 | |
# Force a flush and compaction if any series in a partition gets above this size in bytes | |
# wal-max-series-size = 2097152 | |
# Force a flush of all series and full compaction if there have been no writes in this | |
# amount of time. This is useful for ensuring that shards that are cold for writes don't | |
# keep a bunch of data cached in memory and in the WAL. | |
# wal-flush-cold-interval = "10m" | |
# Force a partition to flush its largest series if it reaches this approximate size in | |
# bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory. | |
# The more memory you have, the bigger this can be. | |
# wal-partition-size-threshold = 20971520 | |
# Whether queries should be logged before execution. Very useful for troubleshooting, but will | |
# log any sensitive data contained within a query. | |
# query-log-enabled = true | |
### | |
### [hinted-handoff] | |
### | |
### Controls the hinted handoff feature, which allows nodes to temporarily | |
### store queued data when one node of a cluster is down for a short period | |
### of time. | |
### | |
[hinted-handoff] | |
enabled = true | |
dir = "/Users/ryan/c/go/var/opt/influxdb/hh" | |
max-size = 1073741824 | |
max-age = "168h" | |
retry-rate-limit = 0 | |
# Hinted handoff will start retrying writes to down nodes at a rate of once per second. | |
# If any error occurs, it will backoff in an exponential manner, until the interval | |
# reaches retry-max-interval. Once writes to all nodes are successfully completed the | |
# interval will reset to retry-interval. | |
retry-interval = "1s" | |
retry-max-interval = "1m" | |
# Interval between running checks for data that should be purged. Data is purged from | |
# hinted-handoff queues for two reasons. 1) The data is older than the max age, or | |
# 2) the target node has been dropped from the cluster. Data is never dropped until | |
# it has reached max-age however, for a dropped node or not. | |
purge-interval = "1h" | |
### | |
### [cluster] | |
### | |
### Controls non-Raft cluster behavior, which generally includes how data is | |
### shared across shards. | |
### | |
[cluster] | |
shard-writer-timeout = "10s" # The time within which a shard must respond to write. | |
write-timeout = "5s" # The time within which a write operation must complete on the cluster. | |
### | |
### [retention] | |
### | |
### Controls the enforcement of retention policies for evicting old data. | |
### | |
[retention] | |
enabled = true | |
check-interval = "30m" | |
### | |
### [shard-precreation] | |
### | |
### Controls the precreation of shards, so they are created before data arrives. | |
### Only shards that will exist in the future, at time of creation, are precreated. | |
[shard-precreation] | |
enabled = true | |
check-interval = "10m" | |
advance-period = "30m" | |
### | |
### Controls the system self-monitoring, statistics and diagnostics. | |
### | |
### The internal database for monitoring data is created automatically if | |
### if it does not already exist. The target retention within this database | |
### is called 'monitor' and is also created with a retention period of 7 days | |
### and a replication factor of 1, if it does not exist. In all cases the | |
### this retention policy is configured as the default for the database. | |
[monitor] | |
store-enabled = true # Whether to record statistics internally. | |
store-database = "_internal" # The destination database for recorded statistics | |
store-interval = "10s" # The interval at which to record statistics | |
### | |
### [admin] | |
### | |
### Controls the availability of the built-in, web-based admin interface. If HTTPS is | |
### enabled for the admin interface, HTTPS must also be enabled on the [http] service. | |
### | |
[admin] | |
enabled = true | |
bind-address = ":8083" | |
https-enabled = false | |
https-certificate = "/etc/ssl/influxdb.pem" | |
### | |
### [http] | |
### | |
### Controls how the HTTP endpoints are configured. These are the primary | |
### mechanism for getting data into and out of InfluxDB. | |
### | |
[http] | |
enabled = true | |
bind-address = ":8086" | |
auth-enabled = false | |
log-enabled = true | |
write-tracing = false | |
pprof-enabled = false | |
https-enabled = false | |
https-certificate = "/etc/ssl/influxdb.pem" | |
### | |
### [[graphite]] | |
### | |
### Controls one or many listeners for Graphite data. | |
### | |
[[graphite]] | |
enabled = false | |
# database = "graphite" | |
# bind-address = ":2003" | |
# protocol = "tcp" | |
# consistency-level = "one" | |
# name-separator = "." | |
# These next lines control how batching works. You should have this enabled | |
# otherwise you could get dropped metrics or poor performance. Batching | |
# will buffer points in memory if you have many coming in. | |
# batch-size = 1000 # will flush if this many points get buffered | |
# batch-pending = 5 # number of batches that may be pending in memory | |
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit | |
## "name-schema" configures tag names for parsing the metric name from graphite protocol; | |
## separated by `name-separator`. | |
## The "measurement" tag is special and the corresponding field will become | |
## the name of the metric. | |
## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as | |
## { | |
## measurement: "cpu", | |
## tags: { | |
## "type": "server", | |
## "host": "localhost, | |
## "device": "cpu0" | |
## } | |
## } | |
# name-schema = "type.host.measurement.device" | |
## If set to true, when the input metric name has more fields than `name-schema` specified, | |
## the extra fields will be ignored. | |
## Otherwise an error will be logged and the metric rejected. | |
# ignore-unnamed = true | |
### | |
### [collectd] | |
### | |
### Controls the listener for collectd data. | |
### | |
[collectd] | |
enabled = false | |
# bind-address = "" | |
# database = "" | |
# typesdb = "" | |
# These next lines control how batching works. You should have this enabled | |
# otherwise you could get dropped metrics or poor performance. Batching | |
# will buffer points in memory if you have many coming in. | |
# batch-size = 1000 # will flush if this many points get buffered | |
# batch-pending = 5 # number of batches that may be pending in memory | |
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit | |
### | |
### [opentsdb] | |
### | |
### Controls the listener for OpenTSDB data. | |
### | |
[opentsdb] | |
enabled = false | |
# bind-address = ":4242" | |
# database = "opentsdb" | |
# retention-policy = "" | |
# consistency-level = "one" | |
# tls-enabled = false | |
# certificate= "" | |
# These next lines control how batching works. You should have this enabled | |
# otherwise you could get dropped metrics or poor performance. Only points | |
# metrics received over the telnet protocol undergo batching. | |
# batch-size = 1000 # will flush if this many points get buffered | |
# batch-pending = 5 # number of batches that may be pending in memory | |
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit | |
### | |
### [[udp]] | |
### | |
### Controls the listeners for InfluxDB line protocol data via UDP. | |
### | |
[[udp]] | |
enabled = false | |
# bind-address = "" | |
# database = "udp" | |
# retention-policy = "" | |
# These next lines control how batching works. You should have this enabled | |
# otherwise you could get dropped metrics or poor performance. Batching | |
# will buffer points in memory if you have many coming in. | |
# batch-size = 1000 # will flush if this many points get buffered | |
# batch-pending = 5 # number of batches that may be pending in memory | |
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit | |
### | |
### [continuous_queries] | |
### | |
### Controls how continuous queries are run within InfluxDB. | |
### | |
[continuous_queries] | |
log-enabled = true | |
enabled = true | |
recompute-previous-n = 2 | |
recompute-no-older-than = "10m" | |
compute-runs-per-interval = 10 | |
compute-no-more-than = "2m" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment