Skip to content

Instantly share code, notes, and snippets.

@nickwallen
Created March 24, 2017 20:27
Show Gist options
  • Save nickwallen/ea51419b40e022f4c75d23ae8555e304 to your computer and use it in GitHub Desktop.
Save nickwallen/ea51419b40e022f4c75d23ae8555e304 to your computer and use it in GitHub Desktop.
#
# kafka global settings
#
[kafka-global]
# Protocol used to communicate with brokers.
# Type: enum value { plaintext, ssl, sasl_plaintext, sasl_ssl }
#security.protocol = PLAINTEXT
# Initial list of brokers as a CSV list of broker host or host:port.
# Type: string
metadata.broker.list = y137.l42scl.hortonworks.com:9092
# Client identifier.
# Type: string
client.id = metron-fastcapa
# Maximum number of messages allowed on the producer queue.
# Type: integer
# Default: 100000
queue.buffering.max.messages = 5000000
# Maximum total message size sum allowed on the producer queue.
# Type: integer
# Default: 4000000
queue.buffering.max.kbytes = 4000000
# Maximum time, in milliseconds, for buffering data on the producer queue.
# Type: integer
# Default: 1000
queue.buffering.max.ms = 3000
# Maximum size for message to be copied to buffer. Messages larger than this will be
# passed by reference (zero-copy) at the expense of larger iovecs.
# Type: integer
# Default: 65535
message.copy.max.bytes = 65535
# Compression codec to use for compressing message sets. This is the default value
# for all topics, may be overriden by the topic configuration property compression.codec.
# Type: enum value { none, gzip, snappy, lz4 }
compression.codec = snappy
# Maximum number of messages batched in one MessageSet. The total MessageSet size is
# also limited by message.max.bytes.
# Increase for better compression.
# Type: integer
batch.num.messages = 500000
# How many times to retry sending a failing MessageSet. Note: retrying may cause reordering.
# Type: integer
message.send.max.retries = 5
# The backoff time in milliseconds before retrying a message send.
# Type: integer
# Default: 100
#retry.backoff.ms = 500
# how often statistics are emitted; 0 = never
# Statistics emit interval. The application also needs to register a stats callback
# using rd_kafka_conf_set_stats_cb(). The granularity is 1000ms. A value of 0 disables statistics.
# Type: integer
# Default: 0
statistics.interval.ms = 5000
# Only provide delivery reports for failed messages.
# Type: boolean
# Default: false
delivery.report.only.error = false
#
# kafka topic settings
#
[kafka-topic]
# This field indicates how many acknowledgements the leader broker must receive from ISR brokers
# before responding to the request:
# 0=Broker does not send any response/ack to client,
# 1=Only the leader broker will need to ack the message,
# -1 or all=broker will block until message is committed by all in sync replicas (ISRs) or broker's in.sync.replicas setting before sending response.
# Type: integer
request.required.acks = 1
# Local message timeout. This value is only enforced locally and limits the time a produced message
# waits for successful delivery. A time of 0 is infinite.
# Type: integer
# Default: 300000
message.timeout.ms = 300000
# Report offset of produced message back to application. The application must be use the
# dr_msg_cb to retrieve the offset from rd_kafka_message_t.offset.
# Type: boolean
# Default: false
produce.offset.report = false
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment