Skip to content

Instantly share code, notes, and snippets.

@solo-seven
Last active September 20, 2019 14:04
Show Gist options
  • Save solo-seven/5469947b17a8ea70559fa62a2d04244c to your computer and use it in GitHub Desktop.
Save solo-seven/5469947b17a8ea70559fa62a2d04244c to your computer and use it in GitHub Desktop.
{
"name": "telegraf-source",
"connector.class": "io.confluent.influxdb.source.InfluxdbSourceConnector",
"influxdb.url": "http://localhost:8088",
"topic.prefix": "influxdb_",
"influxdb.db": "telegraf"
}
[2019-09-17 17:06:00,033] ERROR WorkerSourceTask{id=telegraf-0} Task threw an uncaught and unrecoverable exception (org.apache.kafka.connect.runtime.WorkerTask:179)
java.lang.NullPointerException
at io.confluent.influxdb.source.InfluxKafkaMapper.loadSchema(InfluxKafkaMapper.java:164)
at io.confluent.influxdb.source.InfluxKafkaMapper.mapSeriesToRecords(InfluxKafkaMapper.java:105)
at io.confluent.influxdb.source.InfluxKafkaMapper.toSourceRecords(InfluxKafkaMapper.java:80)
at io.confluent.influxdb.source.InfluxdbSourceTask.poll(InfluxdbSourceTask.java:132)
at org.apache.kafka.connect.runtime.WorkerSourceTask.poll(WorkerSourceTask.java:259)
at org.apache.kafka.connect.runtime.WorkerSourceTask.execute(WorkerSourceTask.java:226)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:177)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:227)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base/java.lang.Thread.run(Thread.java:834)
[2019-09-17 17:06:00,034] ERROR WorkerSourceTask{id=telegraf-0} Task is being killed and will not recover until manually restarted (org.apache.kafka.connect.runtime.WorkerTask:180)
[meta]
# Where the metadata/raft database is stored
dir = "./data/influxdb/meta"
[data]
# The directory where the TSM storage engine stores TSM files.
dir = "./data/influxdb/data"
# The directory where the TSM storage engine stores WAL files.
wal-dir = "./data/influxdb/wal"
[coordinator]
[retention]
[shard-precreation]
[monitor]
[http]
[logging]
[subscriber]
[[graphite]]
[[collectd]]
[[opentsdb]]
[[udp]]
[continuous_queries]
[tls]
# Global tags can be specified here in key="value" format.
[global_tags]
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log file name, the empty string means to log to stderr.
# logfile = ""
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0d"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
urls = ["http://127.0.0.1:8086"]
database = "telegraf"
# # Configuration for the Kafka server to send metrics to
[[outputs.kafka]]
brokers = ["localhost:9092"]
topic = "telegraf"
# Read metrics about cpu usage
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## If true, collect raw CPU time metrics.
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states.
report_active = false
# Read metrics about disk usage by mount point
[[inputs.disk]]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
# Read metrics about disk IO by device
[[inputs.diskio]]
# Get kernel statistics from /proc/stat
[[inputs.kernel]]
# no configuration
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Get the number of processes and group them by status
[[inputs.processes]]
# no configuration
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[inputs.system]]
## Uncomment to remove deprecated metrics.
# fielddrop = ["uptime_format"]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment