Skip to content

Instantly share code, notes, and snippets.

@twashing
Created April 29, 2017 18:14
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save twashing/923babf70fe008394243b8da2d6d1ec5 to your computer and use it in GitHub Desktop.
Save twashing/923babf70fe008394243b8da2d6d1ec5 to your computer and use it in GitHub Desktop.
simplekafkaonyxcommander kafka logs
kafka_1 | ===> ENV Variables ...
kafka_1 |
kafka_1 | echo "===> ENV Variables ..."
kafka_1 | + echo '===> ENV Variables ...'
kafka_1 | env | sort
kafka_1 | + env
kafka_1 | + sort
kafka_1 | COMPONENT=kafka
kafka_1 | CONFLUENT_DEB_REPO=http://packages.confluent.io
kafka_1 | CONFLUENT_DEB_VERSION=1
kafka_1 | CONFLUENT_MAJOR_VERSION=3
kafka_1 | CONFLUENT_MINOR_VERSION=1
kafka_1 | CONFLUENT_PATCH_VERSION=1
kafka_1 | CONFLUENT_VERSION=3.1.1
kafka_1 | HOME=/root
kafka_1 | HOSTNAME=c4e002eac37b
kafka_1 | KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092
kafka_1 | KAFKA_AUTO_CREATE_TOPICS_ENABLE=true
kafka_1 | KAFKA_BROKER_ID=0
kafka_1 | KAFKA_VERSION=0.10.1.0
kafka_1 | KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
kafka_1 | LANG=C.UTF-8
kafka_1 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
kafka_1 | PWD=/
kafka_1 | PYTHON_PIP_VERSION=8.1.2
kafka_1 | PYTHON_VERSION=2.7.9-1
kafka_1 | SCALA_VERSION=2.11
kafka_1 |
kafka_1 | echo "===> User"
kafka_1 | + echo '===> User'
kafka_1 | id
kafka_1 | + id
kafka_1 |
kafka_1 | echo "===> Configuring ..."
kafka_1 | + echo '===> Configuring ...'
kafka_1 | /etc/confluent/docker/configure
kafka_1 | + /etc/confluent/docker/configure
kafka_1 | SHLVL=1
kafka_1 | ZULU_OPENJDK_VERSION=8=8.17.0.3
kafka_1 | _=/usr/bin/env
kafka_1 | no_proxy=*.local, 169.254/16
kafka_1 | ===> User
kafka_1 | uid=0(root) gid=0(root) groups=0(root)
kafka_1 | ===> Configuring ...
kafka_1 |
kafka_1 | dub ensure KAFKA_ZOOKEEPER_CONNECT
kafka_1 | + dub ensure KAFKA_ZOOKEEPER_CONNECT
kafka_1 | dub ensure KAFKA_ADVERTISED_LISTENERS
kafka_1 | + dub ensure KAFKA_ADVERTISED_LISTENERS
kafka_1 |
kafka_1 | # By default, LISTENERS is derived from ADVERTISED_LISTENERS by replacing
kafka_1 | # hosts with 0.0.0.0. This is good default as it ensures that the broker
kafka_1 | # process listens on all ports.
kafka_1 | if [[ -z "${KAFKA_LISTENERS-}" ]]
kafka_1 | then
kafka_1 | export KAFKA_LISTENERS
kafka_1 | KAFKA_LISTENERS=$(cub listeners "$KAFKA_ADVERTISED_LISTENERS")
kafka_1 | fi
kafka_1 | + [[ -z '' ]]
kafka_1 | + export KAFKA_LISTENERS
kafka_1 | cub listeners "$KAFKA_ADVERTISED_LISTENERS"
kafka_1 | ++ cub listeners PLAINTEXT://kafka:9092
kafka_1 | + KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092
kafka_1 |
kafka_1 | dub path /etc/kafka/ writable
kafka_1 | + dub path /etc/kafka/ writable
kafka_1 |
kafka_1 | if [[ -z "${KAFKA_LOG_DIRS-}" ]]
kafka_1 | then
kafka_1 | export KAFKA_LOG_DIRS
kafka_1 | KAFKA_LOG_DIRS="/var/lib/kafka/data"
kafka_1 | fi
kafka_1 | + [[ -z '' ]]
kafka_1 | + export KAFKA_LOG_DIRS
kafka_1 | + KAFKA_LOG_DIRS=/var/lib/kafka/data
kafka_1 |
kafka_1 | # advertised.host, advertised.port, host and port are deprecated. Exit if these properties are set.
kafka_1 | if [[ -n "${KAFKA_ADVERTISED_PORT-}" ]]
kafka_1 | then
kafka_1 | echo "advertised.port is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead."
kafka_1 | exit 1
kafka_1 | fi
kafka_1 | + [[ -n '' ]]
kafka_1 |
kafka_1 | if [[ -n "${KAFKA_ADVERTISED_HOST-}" ]]
kafka_1 | then
kafka_1 | echo "advertised.host is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead."
kafka_1 | exit 1
kafka_1 | fi
kafka_1 | + [[ -n '' ]]
kafka_1 |
kafka_1 | if [[ -n "${KAFKA_HOST-}" ]]
kafka_1 | then
kafka_1 | echo "host is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead."
kafka_1 | exit 1
kafka_1 | fi
kafka_1 | + [[ -n '' ]]
kafka_1 |
kafka_1 | if [[ -n "${KAFKA_PORT-}" ]]
kafka_1 | then
kafka_1 | echo "port is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead."
kafka_1 | exit 1
kafka_1 | fi
kafka_1 | + [[ -n '' ]]
kafka_1 |
kafka_1 | # Set if ADVERTISED_LISTENERS has SSL:// or SASL_SSL:// endpoints.
kafka_1 | if [[ $KAFKA_ADVERTISED_LISTENERS == *"SSL://"* ]]
kafka_1 | then
kafka_1 | echo "SSL is enabled."
kafka_1 |
kafka_1 | dub ensure KAFKA_SSL_KEYSTORE_FILENAME
kafka_1 | export KAFKA_SSL_KEYSTORE_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_KEYSTORE_FILENAME"
kafka_1 | dub path "$KAFKA_SSL_KEYSTORE_LOCATION" exists
kafka_1 |
kafka_1 | dub ensure KAFKA_SSL_KEY_CREDENTIALS
kafka_1 | KAFKA_SSL_KEY_CREDENTIALS_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_KEY_CREDENTIALS"
kafka_1 | dub path "$KAFKA_SSL_KEY_CREDENTIALS_LOCATION" exists
kafka_1 | export KAFKA_SSL_KEY_PASSWORD
kafka_1 | KAFKA_SSL_KEY_PASSWORD=$(cat "$KAFKA_SSL_KEY_CREDENTIALS_LOCATION")
kafka_1 |
kafka_1 | dub ensure KAFKA_SSL_KEYSTORE_CREDENTIALS
kafka_1 | KAFKA_SSL_KEYSTORE_CREDENTIALS_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_KEYSTORE_CREDENTIALS"
kafka_1 | dub path "$KAFKA_SSL_KEYSTORE_CREDENTIALS_LOCATION" exists
kafka_1 | export KAFKA_SSL_KEYSTORE_PASSWORD
kafka_1 | KAFKA_SSL_KEYSTORE_PASSWORD=$(cat "$KAFKA_SSL_KEYSTORE_CREDENTIALS_LOCATION")
kafka_1 |
kafka_1 | dub ensure KAFKA_SSL_TRUSTSTORE_FILENAME
kafka_1 | export KAFKA_SSL_TRUSTSTORE_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_TRUSTSTORE_FILENAME"
kafka_1 | dub path "$KAFKA_SSL_TRUSTSTORE_LOCATION" exists
kafka_1 |
kafka_1 | dub ensure KAFKA_SSL_TRUSTSTORE_CREDENTIALS
kafka_1 | KAFKA_SSL_TRUSTSTORE_CREDENTIALS_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_TRUSTSTORE_CREDENTIALS"
kafka_1 | dub path "$KAFKA_SSL_TRUSTSTORE_CREDENTIALS_LOCATION" exists
kafka_1 | export KAFKA_SSL_TRUSTSTORE_PASSWORD
kafka_1 | KAFKA_SSL_TRUSTSTORE_PASSWORD=$(cat "$KAFKA_SSL_TRUSTSTORE_CREDENTIALS_LOCATION")
kafka_1 |
kafka_1 | fi
kafka_1 | + [[ PLAINTEXT://kafka:9092 == *\S\S\L\:\/\/* ]]
kafka_1 |
kafka_1 | # Set if KAFKA_ADVERTISED_LISTENERS has SSL:// or SASL_SSL:// endpoints.
kafka_1 | if [[ $KAFKA_ADVERTISED_LISTENERS =~ .*SASL_.*://.* ]]
kafka_1 | then
kafka_1 | echo "SASL" is enabled.
kafka_1 |
kafka_1 | dub ensure KAFKA_OPTS
kafka_1 |
kafka_1 | if [[ ! $KAFKA_OPTS == *"java.security.auth.login.config"* ]]
kafka_1 | then
kafka_1 | echo "KAFKA_OPTS should contain 'java.security.auth.login.config' property."
kafka_1 | fi
kafka_1 | fi
kafka_1 | + [[ PLAINTEXT://kafka:9092 =~ .*SASL_.*://.* ]]
kafka_1 |
kafka_1 | if [[ -n "${KAFKA_JMX_OPTS-}" ]]
kafka_1 | then
kafka_1 | if [[ ! $KAFKA_JMX_OPTS == *"com.sun.management.jmxremote.rmi.port"* ]]
kafka_1 | then
kafka_1 | echo "KAFKA_OPTS should contain 'com.sun.management.jmxremote.rmi.port' property. It is required for accessing the JMX metrics externally."
kafka_1 | fi
kafka_1 | fi
kafka_1 | + [[ -n '' ]]
kafka_1 |
kafka_1 | dub template "/etc/confluent/docker/${COMPONENT}.properties.template" "/etc/${COMPONENT}/${COMPONENT}.properties"
kafka_1 | + dub template /etc/confluent/docker/kafka.properties.template /etc/kafka/kafka.properties
kafka_1 | dub template "/etc/confluent/docker/log4j.properties.template" "/etc/${COMPONENT}/log4j.properties"
kafka_1 | + dub template /etc/confluent/docker/log4j.properties.template /etc/kafka/log4j.properties
kafka_1 | dub template "/etc/confluent/docker/tools-log4j.properties.template" "/etc/${COMPONENT}/tools-log4j.properties"
kafka_1 | + dub template /etc/confluent/docker/tools-log4j.properties.template /etc/kafka/tools-log4j.properties
kafka_1 |
kafka_1 | echo "===> Running preflight checks ... "
kafka_1 | + echo '===> Running preflight checks ... '
kafka_1 | /etc/confluent/docker/ensure
kafka_1 | + /etc/confluent/docker/ensure
kafka_1 | ===> Running preflight checks ...
kafka_1 |
kafka_1 | export KAFKA_DATA_DIRS=${KAFKA_DATA_DIRS:-"/var/lib/kafka/data"}
kafka_1 | + export KAFKA_DATA_DIRS=/var/lib/kafka/data
kafka_1 | + KAFKA_DATA_DIRS=/var/lib/kafka/data
kafka_1 | echo "===> Check if $KAFKA_DATA_DIRS is writable ..."
kafka_1 | + echo '===> Check if /var/lib/kafka/data is writable ...'
kafka_1 | dub path "$KAFKA_DATA_DIRS" writable
kafka_1 | + dub path /var/lib/kafka/data writable
kafka_1 | ===> Check if /var/lib/kafka/data is writable ...
kafka_1 | ===> Check if Zookeeper is healthy ...
kafka_1 |
kafka_1 | echo "===> Check if Zookeeper is healthy ..."
kafka_1 | + echo '===> Check if Zookeeper is healthy ...'
kafka_1 | cub zk-ready "$KAFKA_ZOOKEEPER_CONNECT" "${KAFKA_CUB_ZK_TIMEOUT:-40}"
kafka_1 | + cub zk-ready zookeeper:2181 40
kafka_1 | Client environment:zookeeper.version=3.4.6-1569965, built on 02/20/2014 09:09 GMT
kafka_1 | Client environment:host.name=c4e002eac37b
kafka_1 | Client environment:java.version=1.8.0_102
kafka_1 | Client environment:java.vendor=Azul Systems, Inc.
kafka_1 | Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre
kafka_1 | Client environment:java.class.path=/etc/confluent/docker/docker-utils.jar
kafka_1 | Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
kafka_1 | Client environment:java.io.tmpdir=/tmp
kafka_1 | Client environment:java.compiler=<NA>
kafka_1 | Client environment:os.name=Linux
kafka_1 | Client environment:os.arch=amd64
kafka_1 | Client environment:os.version=4.9.13-moby
kafka_1 | Client environment:user.name=root
kafka_1 | Client environment:user.home=/root
kafka_1 | Client environment:user.dir=/
kafka_1 | Initiating client connection, connectString=zookeeper:2181 sessionTimeout=40000 watcher=io.confluent.admin.utils.ZookeeperConnectionWatcher@14514713
kafka_1 | Opening socket connection to server simplekafkaonyxcommander_zookeeper_1.simplekafkaonyxcommander_default/172.19.0.2:2181. Will not attempt to authenticate using SASL (unknown error)
kafka_1 | Session 0x0 for server null, unexpected error, closing socket connection and attempting reconnect
kafka_1 | java.net.ConnectException: Connection refused
kafka_1 | at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
kafka_1 | at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717)
kafka_1 | at org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:361)
kafka_1 | at org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1081)
kafka_1 | Opening socket connection to server simplekafkaonyxcommander_zookeeper_1.simplekafkaonyxcommander_default/172.19.0.2:2181. Will not attempt to authenticate using SASL (unknown error)
kafka_1 | Socket connection established to simplekafkaonyxcommander_zookeeper_1.simplekafkaonyxcommander_default/172.19.0.2:2181, initiating session
kafka_1 | Session establishment complete on server simplekafkaonyxcommander_zookeeper_1.simplekafkaonyxcommander_default/172.19.0.2:2181, sessionid = 0x15bbad6c6310000, negotiated timeout = 40000
kafka_1 | Session: 0x15bbad6c6310000 closed
kafka_1 | EventThread shut down
kafka_1 |
kafka_1 | echo "===> Launching ... "
kafka_1 | + echo '===> Launching ... '
kafka_1 | exec /etc/confluent/docker/launch
kafka_1 | + exec /etc/confluent/docker/launch
kafka_1 | ===> Launching ...
kafka_1 | ===> Launching kafka ...
kafka_1 | [2017-04-29 17:51:31,643] INFO KafkaConfig values:
kafka_1 | advertised.host.name = null
kafka_1 | advertised.listeners = PLAINTEXT://kafka:9092
kafka_1 | advertised.port = null
kafka_1 | authorizer.class.name =
kafka_1 | auto.create.topics.enable = true
kafka_1 | auto.leader.rebalance.enable = true
kafka_1 | background.threads = 10
kafka_1 | broker.id = 0
kafka_1 | broker.id.generation.enable = true
kafka_1 | broker.rack = null
kafka_1 | compression.type = producer
kafka_1 | connections.max.idle.ms = 600000
kafka_1 | controlled.shutdown.enable = true
kafka_1 | controlled.shutdown.max.retries = 3
kafka_1 | controlled.shutdown.retry.backoff.ms = 5000
kafka_1 | controller.socket.timeout.ms = 30000
kafka_1 | default.replication.factor = 1
kafka_1 | delete.topic.enable = false
kafka_1 | fetch.purgatory.purge.interval.requests = 1000
kafka_1 | group.max.session.timeout.ms = 300000
kafka_1 | group.min.session.timeout.ms = 6000
kafka_1 | host.name =
kafka_1 | inter.broker.protocol.version = 0.10.1-IV2
kafka_1 | leader.imbalance.check.interval.seconds = 300
kafka_1 | leader.imbalance.per.broker.percentage = 10
kafka_1 | listeners = PLAINTEXT://0.0.0.0:9092
kafka_1 | log.cleaner.backoff.ms = 15000
kafka_1 | log.cleaner.dedupe.buffer.size = 134217728
kafka_1 | log.cleaner.delete.retention.ms = 86400000
kafka_1 | log.cleaner.enable = true
kafka_1 | log.cleaner.io.buffer.load.factor = 0.9
kafka_1 | log.cleaner.io.buffer.size = 524288
kafka_1 | log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
kafka_1 | log.cleaner.min.cleanable.ratio = 0.5
kafka_1 | log.cleaner.min.compaction.lag.ms = 0
kafka_1 | log.cleaner.threads = 1
kafka_1 | log.cleanup.policy = [delete]
kafka_1 | log.dir = /tmp/kafka-logs
kafka_1 | log.dirs = /var/lib/kafka/data
kafka_1 | log.flush.interval.messages = 9223372036854775807
kafka_1 | log.flush.interval.ms = null
kafka_1 | log.flush.offset.checkpoint.interval.ms = 60000
kafka_1 | log.flush.scheduler.interval.ms = 9223372036854775807
kafka_1 | log.index.interval.bytes = 4096
kafka_1 | log.index.size.max.bytes = 10485760
kafka_1 | log.message.format.version = 0.10.1-IV2
kafka_1 | log.message.timestamp.difference.max.ms = 9223372036854775807
kafka_1 | log.message.timestamp.type = CreateTime
kafka_1 | log.preallocate = false
kafka_1 | log.retention.bytes = -1
kafka_1 | log.retention.check.interval.ms = 300000
kafka_1 | log.retention.hours = 168
kafka_1 | log.retention.minutes = null
kafka_1 | log.retention.ms = null
kafka_1 | log.roll.hours = 168
kafka_1 | log.roll.jitter.hours = 0
kafka_1 | log.roll.jitter.ms = null
kafka_1 | log.roll.ms = null
kafka_1 | log.segment.bytes = 1073741824
kafka_1 | log.segment.delete.delay.ms = 60000
kafka_1 | max.connections.per.ip = 2147483647
kafka_1 | max.connections.per.ip.overrides =
kafka_1 | message.max.bytes = 1000012
kafka_1 | metric.reporters = []
kafka_1 | metrics.num.samples = 2
kafka_1 | metrics.sample.window.ms = 30000
kafka_1 | min.insync.replicas = 1
kafka_1 | num.io.threads = 8
kafka_1 | num.network.threads = 3
kafka_1 | num.partitions = 1
kafka_1 | num.recovery.threads.per.data.dir = 1
kafka_1 | num.replica.fetchers = 1
kafka_1 | offset.metadata.max.bytes = 4096
kafka_1 | offsets.commit.required.acks = -1
kafka_1 | offsets.commit.timeout.ms = 5000
kafka_1 | offsets.load.buffer.size = 5242880
kafka_1 | offsets.retention.check.interval.ms = 600000
kafka_1 | offsets.retention.minutes = 1440
kafka_1 | offsets.topic.compression.codec = 0
kafka_1 | offsets.topic.num.partitions = 50
kafka_1 | offsets.topic.replication.factor = 3
kafka_1 | offsets.topic.segment.bytes = 104857600
kafka_1 | port = 9092
kafka_1 | principal.builder.class = class org.apache.kafka.common.security.auth.DefaultPrincipalBuilder
kafka_1 | producer.purgatory.purge.interval.requests = 1000
kafka_1 | queued.max.requests = 500
kafka_1 | quota.consumer.default = 9223372036854775807
kafka_1 | quota.producer.default = 9223372036854775807
kafka_1 | quota.window.num = 11
kafka_1 | quota.window.size.seconds = 1
kafka_1 | replica.fetch.backoff.ms = 1000
kafka_1 | replica.fetch.max.bytes = 1048576
kafka_1 | replica.fetch.min.bytes = 1
kafka_1 | replica.fetch.response.max.bytes = 10485760
kafka_1 | replica.fetch.wait.max.ms = 500
kafka_1 | replica.high.watermark.checkpoint.interval.ms = 5000
kafka_1 | replica.lag.time.max.ms = 10000
kafka_1 | replica.socket.receive.buffer.bytes = 65536
kafka_1 | replica.socket.timeout.ms = 30000
kafka_1 | replication.quota.window.num = 11
kafka_1 | replication.quota.window.size.seconds = 1
kafka_1 | request.timeout.ms = 30000
kafka_1 | reserved.broker.max.id = 1000
kafka_1 | sasl.enabled.mechanisms = [GSSAPI]
kafka_1 | sasl.kerberos.kinit.cmd = /usr/bin/kinit
kafka_1 | sasl.kerberos.min.time.before.relogin = 60000
kafka_1 | sasl.kerberos.principal.to.local.rules = [DEFAULT]
kafka_1 | sasl.kerberos.service.name = null
kafka_1 | sasl.kerberos.ticket.renew.jitter = 0.05
kafka_1 | sasl.kerberos.ticket.renew.window.factor = 0.8
kafka_1 | sasl.mechanism.inter.broker.protocol = GSSAPI
kafka_1 | security.inter.broker.protocol = PLAINTEXT
kafka_1 | socket.receive.buffer.bytes = 102400
kafka_1 | socket.request.max.bytes = 104857600
kafka_1 | socket.send.buffer.bytes = 102400
kafka_1 | ssl.cipher.suites = null
kafka_1 | ssl.client.auth = none
kafka_1 | ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
kafka_1 | ssl.endpoint.identification.algorithm = null
kafka_1 | ssl.key.password = null
kafka_1 | ssl.keymanager.algorithm = SunX509
kafka_1 | ssl.keystore.location = null
kafka_1 | ssl.keystore.password = null
kafka_1 | ssl.keystore.type = JKS
kafka_1 | ssl.protocol = TLS
kafka_1 | ssl.provider = null
kafka_1 | ssl.secure.random.implementation = null
kafka_1 | ssl.trustmanager.algorithm = PKIX
kafka_1 | ssl.truststore.location = null
kafka_1 | ssl.truststore.password = null
kafka_1 | ssl.truststore.type = JKS
kafka_1 | unclean.leader.election.enable = true
kafka_1 | zookeeper.connect = zookeeper:2181
kafka_1 | zookeeper.connection.timeout.ms = null
kafka_1 | zookeeper.session.timeout.ms = 6000
kafka_1 | zookeeper.set.acl = false
kafka_1 | zookeeper.sync.time.ms = 2000
kafka_1 | (kafka.server.KafkaConfig)
kafka_1 | [2017-04-29 17:51:31,738] WARN The support metrics collection feature ("Metrics") of Proactive Support is disabled. (io.confluent.support.metrics.SupportedServerStartable)
kafka_1 | [2017-04-29 17:51:31,739] INFO starting (kafka.server.KafkaServer)
kafka_1 | [2017-04-29 17:51:31,775] INFO [ThrottledRequestReaper-Fetch], Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
kafka_1 | [2017-04-29 17:51:31,778] INFO [ThrottledRequestReaper-Produce], Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
kafka_1 | [2017-04-29 17:51:31,784] INFO Connecting to zookeeper on zookeeper:2181 (kafka.server.KafkaServer)
kafka_1 | [2017-04-29 17:51:31,805] INFO Starting ZkClient event thread. (org.I0Itec.zkclient.ZkEventThread)
kafka_1 | [2017-04-29 17:51:31,815] INFO Client environment:zookeeper.version=3.4.8--1, built on 02/06/2016 03:18 GMT (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,815] INFO Client environment:host.name=c4e002eac37b (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,815] INFO Client environment:java.version=1.8.0_102 (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,815] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,815] INFO Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,815] INFO Client environment:java.class.path=:/usr/bin/../share/java/kafka/kafka_2.11-0.10.1.0-cp2-javadoc.jar:/usr/bin/../share/java/kafka/support-metrics-client-3.1.1.jar:/usr/bin/../share/java/kafka/jackson-core-2.6.3.jar:/usr/bin/../share/java/kafka/jackson-module-jaxb-annotations-2.6.3.jar:/usr/bin/../share/java/kafka/kafka-clients-0.10.1.0-cp2.jar:/usr/bin/../share/java/kafka/kafka-streams-examples-0.10.1.0-cp2.jar:/usr/bin/../share/java/kafka/commons-validator-1.4.1.jar:/usr/bin/../share/java/kafka/jackson-core-asl-1.9.13.jar:/usr/bin/../share/java/kafka/metrics-core-2.2.0.jar:/usr/bin/../share/java/kafka/reflections-0.9.10.jar:/usr/bin/../share/java/kafka/xz-1.0.jar:/usr/bin/../share/java/kafka/hk2-locator-2.4.0-b34.jar:/usr/bin/../share/java/kafka/connect-json-0.10.1.0-cp2.jar:/usr/bin/../share/java/kafka/rocksdbjni-4.9.0.jar:/usr/bin/../share/java/kafka/jopt-simple-4.9.jar:/usr/bin/../share/java/kafka/kafka_2.11-0.10.1.0-cp2-test.jar:/usr/bin/../share/java/kafka/jackson-mapper-asl-1.9.13.jar:/usr/bin/../share/java/kafka/kafka-log4j-appender-0.10.1.0-cp2.jar:/usr/bin/../share/java/kafka/jetty-server-9.2.15.v20160210.jar:/usr/bin/../share/java/kafka/javax.inject-2.4.0-b34.jar:/usr/bin/../share/java/kafka/commons-collections-3.2.1.jar:/usr/bin/../share/java/kafka/kafka_2.11-0.10.1.0-cp2-sources.jar:/usr/bin/../share/java/kafka/osgi-resource-locator-1.0.1.jar:/usr/bin/../share/java/kafka/httpcore-4.4.3.jar:/usr/bin/../share/java/kafka/jersey-server-2.22.2.jar:/usr/bin/../share/java/kafka/jersey-media-jaxb-2.22.2.jar:/usr/bin/../share/java/kafka/javax.inject-1.jar:/usr/bin/../share/java/kafka/log4j-1.2.17.jar:/usr/bin/../share/java/kafka/hk2-api-2.4.0-b34.jar:/usr/bin/../share/java/kafka/jetty-util-9.2.15.v20160210.jar:/usr/bin/../share/java/kafka/kafka_2.11-0.10.1.0-cp2.jar:/usr/bin/../share/java/kafka/connect-file-0.10.1.0-cp2.jar:/usr/bin/../share/java/kafka/commons-beanutils-1.8.3.jar:/usr/bin/../share/java/kafka/commons-codec-1.9.jar:/usr/bin/../share/java/kafka/jetty-servlets-9.2.15.v20160210.jar:/usr/bin/../share/java/kafka/javax.ws.rs-api-2.0.1.jar:/usr/bin/../share/java/kafka/snappy-java-1.1.2.6.jar:/usr/bin/../share/java/kafka/scala-parser-combinators_2.11-1.0.4.jar:/usr/bin/../share/java/kafka/kafka_2.11-0.10.1.0-cp2-test-sources.jar:/usr/bin/../share/java/kafka/connect-runtime-0.10.1.0-cp2.jar:/usr/bin/../share/java/kafka/kafka.jar:/usr/bin/../share/java/kafka/lz4-1.3.0.jar:/usr/bin/../share/java/kafka/httpclient-4.5.1.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-2.22.2.jar:/usr/bin/../share/java/kafka/javax.annotation-api-1.2.jar:/usr/bin/../share/java/kafka/javax.servlet-api-3.1.0.jar:/usr/bin/../share/java/kafka/jackson-annotations-2.6.0.jar:/usr/bin/../share/java/kafka/kafka_2.11-0.10.1.0-cp2-scaladoc.jar:/usr/bin/../share/java/kafka/connect-api-0.10.1.0-cp2.jar:/usr/bin/../share/java/kafka/httpmime-4.5.1.jar:/usr/bin/../share/java/kafka/validation-api-1.1.0.Final.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-core-2.22.2.jar:/usr/bin/../share/java/kafka/commons-compress-1.4.1.jar:/usr/bin/../share/java/kafka/javassist-3.18.2-GA.jar:/usr/bin/../share/java/kafka/jersey-client-2.22.2.jar:/usr/bin/../share/java/kafka/argparse4j-0.5.0.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-base-2.6.3.jar:/usr/bin/../share/java/kafka/zookeeper-3.4.8.jar:/usr/bin/../share/java/kafka/jetty-io-9.2.15.v20160210.jar:/usr/bin/../share/java/kafka/jersey-common-2.22.2.jar:/usr/bin/../share/java/kafka/scala-library-2.11.8.jar:/usr/bin/../share/java/kafka/aopalliance-repackaged-2.4.0-b34.jar:/usr/bin/../share/java/kafka/jetty-continuation-9.2.15.v20160210.jar:/usr/bin/../share/java/kafka/commons-logging-1.2.jar:/usr/bin/../share/java/kafka/paranamer-2.3.jar:/usr/bin/../share/java/kafka/jetty-http-9.2.15.v20160210.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-json-provider-2.6.3.jar:/usr/bin/../share/java/kafka/slf4j-log4j12-1.7.21.jar:/usr/bin/../share/java/kafka/jackson-databind-2.6.3.jar:/usr/bin/../share/java/kafka/hk2-utils-2.4.0-b34.jar:/usr/bin/../share/java/kafka/zkclient-0.9.jar:/usr/bin/../share/java/kafka/kafka-streams-0.10.1.0-cp2.jar:/usr/bin/../share/java/kafka/kafka-tools-0.10.1.0-cp2.jar:/usr/bin/../share/java/kafka/commons-digester-1.8.1.jar:/usr/bin/../share/java/kafka/avro-1.7.7.jar:/usr/bin/../share/java/kafka/jersey-guava-2.22.2.jar:/usr/bin/../share/java/kafka/slf4j-api-1.7.21.jar:/usr/bin/../share/java/kafka/commons-lang3-3.1.jar:/usr/bin/../share/java/kafka/jetty-security-9.2.15.v20160210.jar:/usr/bin/../share/java/kafka/guava-18.0.jar:/usr/bin/../share/java/kafka/jetty-servlet-9.2.15.v20160210.jar:/usr/bin/../share/java/kafka/support-metrics-common-3.1.1.jar:/usr/bin/../share/java/confluent-support-metrics/*:/usr/share/java/confluent-support-metrics/* (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,815] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,815] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,816] INFO Client environment:java.compiler=<NA> (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,816] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,816] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,816] INFO Client environment:os.version=4.9.13-moby (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,816] INFO Client environment:user.name=root (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,816] INFO Client environment:user.home=/root (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,816] INFO Client environment:user.dir=/ (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,817] INFO Initiating client connection, connectString=zookeeper:2181 sessionTimeout=6000 watcher=org.I0Itec.zkclient.ZkClient@6eda5c9 (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2017-04-29 17:51:31,855] INFO Waiting for keeper state SyncConnected (org.I0Itec.zkclient.ZkClient)
kafka_1 | [2017-04-29 17:51:31,863] INFO Opening socket connection to server simplekafkaonyxcommander_zookeeper_1.simplekafkaonyxcommander_default/172.19.0.2:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn)
kafka_1 | [2017-04-29 17:51:31,960] INFO Socket connection established to simplekafkaonyxcommander_zookeeper_1.simplekafkaonyxcommander_default/172.19.0.2:2181, initiating session (org.apache.zookeeper.ClientCnxn)
kafka_1 | [2017-04-29 17:51:31,975] INFO Session establishment complete on server simplekafkaonyxcommander_zookeeper_1.simplekafkaonyxcommander_default/172.19.0.2:2181, sessionid = 0x15bbad6c6310001, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn)
kafka_1 | [2017-04-29 17:51:31,977] INFO zookeeper state changed (SyncConnected) (org.I0Itec.zkclient.ZkClient)
kafka_1 | [2017-04-29 17:51:32,271] INFO Cluster ID = BdQMdbNeRLKV5M4JgwzPhQ (kafka.server.KafkaServer)
kafka_1 | [2017-04-29 17:51:32,349] INFO Loading logs. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:51:32,357] INFO Logs loading complete in 8 ms. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:51:32,427] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:51:32,430] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:51:32,433] INFO Starting the log cleaner (kafka.log.LogCleaner)
kafka_1 | [2017-04-29 17:51:32,434] INFO [kafka-log-cleaner-thread-0], Starting (kafka.log.LogCleaner)
kafka_1 | [2017-04-29 17:51:32,436] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint)
kafka_1 | [2017-04-29 17:51:32,520] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor)
kafka_1 | [2017-04-29 17:51:32,525] INFO [Socket Server on Broker 0], Started 1 acceptor threads (kafka.network.SocketServer)
kafka_1 | [2017-04-29 17:51:32,551] INFO [ExpirationReaper-0], Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
kafka_1 | [2017-04-29 17:51:32,552] INFO [ExpirationReaper-0], Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
kafka_1 | [2017-04-29 17:51:32,596] INFO [Controller 0]: Controller starting up (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,604] INFO Creating /controller (is it secure? false) (kafka.utils.ZKCheckedEphemeral)
kafka_1 | [2017-04-29 17:51:32,619] INFO Result of znode creation is: OK (kafka.utils.ZKCheckedEphemeral)
kafka_1 | [2017-04-29 17:51:32,620] INFO 0 successfully elected as leader (kafka.server.ZookeeperLeaderElector)
kafka_1 | [2017-04-29 17:51:32,621] INFO [Controller 0]: Broker 0 starting become controller state transition (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,657] INFO [Controller 0]: Controller 0 incremented epoch to 1 (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,658] DEBUG [Controller 0]: Registering IsrChangeNotificationListener (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,690] INFO [Controller 0]: Partitions undergoing preferred replica election: (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,691] INFO [Controller 0]: Partitions that completed preferred replica election: (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,693] INFO [Controller 0]: Resuming preferred replica election for partitions: (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,696] INFO [Controller 0]: Partitions being reassigned: Map() (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,697] INFO [Controller 0]: Partitions already reassigned: Set() (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,700] INFO [Controller 0]: Resuming reassignment of partitions: Map() (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,705] INFO [Controller 0]: List of topics to be deleted: (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,706] INFO [Controller 0]: List of topics ineligible for deletion: (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,710] INFO [Controller 0]: Currently active brokers in the cluster: Set() (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,711] INFO [Controller 0]: Currently shutting brokers in the cluster: Set() (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,712] INFO [Controller 0]: Current list of topics in the cluster: Set() (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,715] INFO [Replica state machine on controller 0]: Started replica state machine with initial state -> Map() (kafka.controller.ReplicaStateMachine)
kafka_1 | [2017-04-29 17:51:32,725] INFO [Partition state machine on Controller 0]: Started partition state machine with initial state -> Map() (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:51:32,727] INFO [Controller 0]: Broker 0 is ready to serve as the new controller with epoch 1 (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,729] INFO [Controller 0]: Starting preferred replica leader election for partitions (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,730] INFO [Partition state machine on Controller 0]: Invoking state change to OnlinePartition for partitions (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:51:32,755] INFO [Controller 0]: starting the partition rebalance scheduler (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,758] INFO [Controller 0]: Controller startup complete (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:32,772] INFO [ExpirationReaper-0], Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
kafka_1 | [2017-04-29 17:51:32,798] INFO [ExpirationReaper-0], Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
kafka_1 | [2017-04-29 17:51:32,800] INFO [ExpirationReaper-0], Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
kafka_1 | [2017-04-29 17:51:32,829] INFO [GroupCoordinator 0]: Starting up. (kafka.coordinator.GroupCoordinator)
kafka_1 | [2017-04-29 17:51:32,831] INFO [GroupCoordinator 0]: Startup complete. (kafka.coordinator.GroupCoordinator)
kafka_1 | [2017-04-29 17:51:32,836] INFO [Group Metadata Manager on Broker 0]: Removed 0 expired offsets in 4 milliseconds. (kafka.coordinator.GroupMetadataManager)
kafka_1 | [2017-04-29 17:51:32,876] INFO Will not load MX4J, mx4j-tools.jar is not in the classpath (kafka.utils.Mx4jLoader$)
kafka_1 | [2017-04-29 17:51:32,918] INFO New leader is 0 (kafka.server.ZookeeperLeaderElector$LeaderChangeListener)
kafka_1 | [2017-04-29 17:51:32,931] INFO Creating /brokers/ids/0 (is it secure? false) (kafka.utils.ZKCheckedEphemeral)
kafka_1 | [2017-04-29 17:51:32,946] INFO Result of znode creation is: OK (kafka.utils.ZKCheckedEphemeral)
kafka_1 | [2017-04-29 17:51:32,949] INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT -> EndPoint(kafka,9092,PLAINTEXT) (kafka.utils.ZkUtils)
kafka_1 | [2017-04-29 17:51:32,951] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint)
kafka_1 | [2017-04-29 17:51:32,953] INFO [BrokerChangeListener on Controller 0]: Broker change listener fired for path /brokers/ids with children 0 (kafka.controller.ReplicaStateMachine$BrokerChangeListener)
kafka_1 | [2017-04-29 17:51:33,006] INFO Kafka version : 0.10.1.0-cp2 (org.apache.kafka.common.utils.AppInfoParser)
kafka_1 | [2017-04-29 17:51:33,006] INFO Kafka commitId : beb290796c342e22 (org.apache.kafka.common.utils.AppInfoParser)
kafka_1 | [2017-04-29 17:51:33,010] INFO [Kafka Server 0], started (kafka.server.KafkaServer)
kafka_1 | [2017-04-29 17:51:33,032] INFO [BrokerChangeListener on Controller 0]: Newly added brokers: 0, deleted brokers: , all live brokers: 0 (kafka.controller.ReplicaStateMachine$BrokerChangeListener)
kafka_1 | [2017-04-29 17:51:33,036] DEBUG [Channel manager on controller 0]: Controller 0 trying to connect to broker 0 (kafka.controller.ControllerChannelManager)
kafka_1 | [2017-04-29 17:51:33,051] INFO [Controller-0-to-broker-0-send-thread], Starting (kafka.controller.RequestSendThread)
kafka_1 | [2017-04-29 17:51:33,052] INFO [Controller 0]: New broker startup callback for 0 (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:33,102] INFO [Controller-0-to-broker-0-send-thread], Controller 0 connected to kafka:9092 (id: 0 rack: null) for sending state change requests (kafka.controller.RequestSendThread)
kafka_1 | [2017-04-29 17:51:33,161] TRACE Controller 0 epoch 1 received response {error_code=0} for a request sent to broker kafka:9092 (id: 0 rack: null) (state.change.logger)
kafka_1 | [2017-04-29 17:51:37,759] TRACE [Controller 0]: checking need to trigger partition rebalance (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:51:37,767] DEBUG [Controller 0]: preferred replicas by broker Map() (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:56:37,758] TRACE [Controller 0]: checking need to trigger partition rebalance (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:56:37,759] DEBUG [Controller 0]: preferred replicas by broker Map() (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:59:17,307] DEBUG [TopicChangeListener on Controller 0]: Topic change listener fired for path /brokers/topics with children scanner-command (kafka.controller.PartitionStateMachine$TopicChangeListener)
kafka_1 | [2017-04-29 17:59:17,325] INFO [TopicChangeListener on Controller 0]: New topics: [Set(scanner-command)], deleted topics: [Set()], new partition replica assignment [Map([scanner-command,3] -> List(0), [scanner-command,7] -> List(0), [scanner-command,1] -> List(0), [scanner-command,6] -> List(0), [scanner-command,0] -> List(0), [scanner-command,4] -> List(0), [scanner-command,9] -> List(0), [scanner-command,8] -> List(0), [scanner-command,5] -> List(0), [scanner-command,2] -> List(0))] (kafka.controller.PartitionStateMachine$TopicChangeListener)
kafka_1 | [2017-04-29 17:59:17,327] INFO [Controller 0]: New topic creation callback for [scanner-command,0],[scanner-command,6],[scanner-command,5],[scanner-command,7],[scanner-command,9],[scanner-command,8],[scanner-command,2],[scanner-command,4],[scanner-command,1],[scanner-command,3] (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:59:17,331] INFO [Controller 0]: New partition creation callback for [scanner-command,0],[scanner-command,6],[scanner-command,5],[scanner-command,7],[scanner-command,9],[scanner-command,8],[scanner-command,2],[scanner-command,4],[scanner-command,1],[scanner-command,3] (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:59:17,333] INFO [Partition state machine on Controller 0]: Invoking state change to NewPartition for partitions [scanner-command,0],[scanner-command,6],[scanner-command,5],[scanner-command,7],[scanner-command,9],[scanner-command,8],[scanner-command,2],[scanner-command,4],[scanner-command,1],[scanner-command,3] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,336] TRACE Controller 0 epoch 1 changed partition [scanner-command,0] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,337] TRACE Controller 0 epoch 1 changed partition [scanner-command,6] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,337] TRACE Controller 0 epoch 1 changed partition [scanner-command,5] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,338] TRACE Controller 0 epoch 1 changed partition [scanner-command,7] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,338] TRACE Controller 0 epoch 1 changed partition [scanner-command,9] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,338] TRACE Controller 0 epoch 1 changed partition [scanner-command,8] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,339] TRACE Controller 0 epoch 1 changed partition [scanner-command,2] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,339] TRACE Controller 0 epoch 1 changed partition [scanner-command,4] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,340] TRACE Controller 0 epoch 1 changed partition [scanner-command,1] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,340] TRACE Controller 0 epoch 1 changed partition [scanner-command,3] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,344] INFO [Replica state machine on controller 0]: Invoking state change to NewReplica for replicas [Topic=scanner-command,Partition=6,Replica=0],[Topic=scanner-command,Partition=9,Replica=0],[Topic=scanner-command,Partition=3,Replica=0],[Topic=scanner-command,Partition=0,Replica=0],[Topic=scanner-command,Partition=5,Replica=0],[Topic=scanner-command,Partition=7,Replica=0],[Topic=scanner-command,Partition=8,Replica=0],[Topic=scanner-command,Partition=2,Replica=0],[Topic=scanner-command,Partition=4,Replica=0],[Topic=scanner-command,Partition=1,Replica=0] (kafka.controller.ReplicaStateMachine)
kafka_1 | [2017-04-29 17:59:17,354] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,6] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,359] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,9] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,365] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,3] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,373] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,0] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,374] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,5] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,375] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,7] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,376] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,8] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,380] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,2] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,382] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,4] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,384] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,1] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,384] INFO [Partition state machine on Controller 0]: Invoking state change to OnlinePartition for partitions [scanner-command,0],[scanner-command,6],[scanner-command,5],[scanner-command,7],[scanner-command,9],[scanner-command,8],[scanner-command,2],[scanner-command,4],[scanner-command,1],[scanner-command,3] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,386] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner-command,0] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,389] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner-command,0] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,427] TRACE Controller 0 epoch 1 changed partition [scanner-command,0] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,427] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner-command,6] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,428] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner-command,6] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,456] TRACE Controller 0 epoch 1 changed partition [scanner-command,6] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,457] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner-command,5] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,457] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner-command,5] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,472] TRACE Controller 0 epoch 1 changed partition [scanner-command,5] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,472] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner-command,7] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,473] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner-command,7] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,488] TRACE Controller 0 epoch 1 changed partition [scanner-command,7] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,488] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner-command,9] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,488] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner-command,9] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,503] TRACE Controller 0 epoch 1 changed partition [scanner-command,9] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,503] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner-command,8] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,503] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner-command,8] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,519] TRACE Controller 0 epoch 1 changed partition [scanner-command,8] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,519] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner-command,2] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,519] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner-command,2] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,536] TRACE Controller 0 epoch 1 changed partition [scanner-command,2] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,536] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner-command,4] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,537] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner-command,4] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,552] TRACE Controller 0 epoch 1 changed partition [scanner-command,4] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,553] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner-command,1] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,553] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner-command,1] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,569] TRACE Controller 0 epoch 1 changed partition [scanner-command,1] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,569] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner-command,3] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,570] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner-command,3] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,585] TRACE Controller 0 epoch 1 changed partition [scanner-command,3] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,587] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner-command,6] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,587] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner-command,3] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,588] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner-command,8] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,588] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner-command,0] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,588] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner-command,5] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,589] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner-command,2] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,589] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner-command,7] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,589] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner-command,1] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,590] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner-command,4] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,590] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner-command,9] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,596] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-command-6 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,596] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-command-3 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,596] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-command-8 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,596] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-command-0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,596] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-command-5 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,596] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-command-2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,596] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-command-7 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,596] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-command-1 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,597] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-command-4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,597] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-command-9 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,598] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 1 from controller 0 epoch 1 for partition [scanner-command,7] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,599] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 1 from controller 0 epoch 1 for partition [scanner-command,6] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,599] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 1 from controller 0 epoch 1 for partition [scanner-command,9] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,600] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 1 from controller 0 epoch 1 for partition [scanner-command,8] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,600] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 1 from controller 0 epoch 1 for partition [scanner-command,3] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,600] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 1 from controller 0 epoch 1 for partition [scanner-command,2] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,600] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 1 from controller 0 epoch 1 for partition [scanner-command,5] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,600] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 1 from controller 0 epoch 1 for partition [scanner-command,4] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,600] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 1 from controller 0 epoch 1 for partition [scanner-command,1] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,600] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 1 from controller 0 epoch 1 for partition [scanner-command,0] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,611] INFO [Replica state machine on controller 0]: Invoking state change to OnlineReplica for replicas [Topic=scanner-command,Partition=6,Replica=0],[Topic=scanner-command,Partition=9,Replica=0],[Topic=scanner-command,Partition=3,Replica=0],[Topic=scanner-command,Partition=0,Replica=0],[Topic=scanner-command,Partition=5,Replica=0],[Topic=scanner-command,Partition=7,Replica=0],[Topic=scanner-command,Partition=8,Replica=0],[Topic=scanner-command,Partition=2,Replica=0],[Topic=scanner-command,Partition=4,Replica=0],[Topic=scanner-command,Partition=1,Replica=0] (kafka.controller.ReplicaStateMachine)
kafka_1 | [2017-04-29 17:59:17,613] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,6] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,613] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,9] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,613] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,3] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,614] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,0] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,614] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,5] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,614] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,7] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,614] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,8] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,614] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,2] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,615] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,4] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,615] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner-command,1] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,617] DEBUG [TopicChangeListener on Controller 0]: Topic change listener fired for path /brokers/topics with children scanner,scanner-command (kafka.controller.PartitionStateMachine$TopicChangeListener)
kafka_1 | [2017-04-29 17:59:17,623] INFO [TopicChangeListener on Controller 0]: New topics: [Set(scanner)], deleted topics: [Set()], new partition replica assignment [Map([scanner,5] -> List(0), [scanner,7] -> List(0), [scanner,0] -> List(0), [scanner,1] -> List(0), [scanner,9] -> List(0), [scanner,2] -> List(0), [scanner,6] -> List(0), [scanner,8] -> List(0), [scanner,3] -> List(0), [scanner,4] -> List(0))] (kafka.controller.PartitionStateMachine$TopicChangeListener)
kafka_1 | [2017-04-29 17:59:17,623] INFO [Controller 0]: New topic creation callback for [scanner,4],[scanner,1],[scanner,3],[scanner,5],[scanner,9],[scanner,2],[scanner,6],[scanner,8],[scanner,0],[scanner,7] (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:59:17,625] INFO [Controller 0]: New partition creation callback for [scanner,4],[scanner,1],[scanner,3],[scanner,5],[scanner,9],[scanner,2],[scanner,6],[scanner,8],[scanner,0],[scanner,7] (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 17:59:17,625] INFO [Partition state machine on Controller 0]: Invoking state change to NewPartition for partitions [scanner,4],[scanner,1],[scanner,3],[scanner,5],[scanner,9],[scanner,2],[scanner,6],[scanner,8],[scanner,0],[scanner,7] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,625] TRACE Controller 0 epoch 1 changed partition [scanner,4] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,625] TRACE Controller 0 epoch 1 changed partition [scanner,1] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,625] TRACE Controller 0 epoch 1 changed partition [scanner,3] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,626] TRACE Controller 0 epoch 1 changed partition [scanner,5] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,626] TRACE Controller 0 epoch 1 changed partition [scanner,9] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,626] TRACE Controller 0 epoch 1 changed partition [scanner,2] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,626] TRACE Controller 0 epoch 1 changed partition [scanner,6] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,626] TRACE Controller 0 epoch 1 changed partition [scanner,8] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,626] TRACE Controller 0 epoch 1 changed partition [scanner,0] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,626] TRACE Controller 0 epoch 1 changed partition [scanner,7] state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,627] INFO [Replica state machine on controller 0]: Invoking state change to NewReplica for replicas [Topic=scanner,Partition=7,Replica=0],[Topic=scanner,Partition=6,Replica=0],[Topic=scanner,Partition=3,Replica=0],[Topic=scanner,Partition=9,Replica=0],[Topic=scanner,Partition=2,Replica=0],[Topic=scanner,Partition=1,Replica=0],[Topic=scanner,Partition=4,Replica=0],[Topic=scanner,Partition=0,Replica=0],[Topic=scanner,Partition=5,Replica=0],[Topic=scanner,Partition=8,Replica=0] (kafka.controller.ReplicaStateMachine)
kafka_1 | [2017-04-29 17:59:17,628] TRACE Broker 0 handling LeaderAndIsr request correlationId 1 from controller 0 epoch 1 starting the become-leader transition for partition [scanner-command,6] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,628] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,7] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,632] TRACE Broker 0 handling LeaderAndIsr request correlationId 1 from controller 0 epoch 1 starting the become-leader transition for partition [scanner-command,3] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,633] TRACE Broker 0 handling LeaderAndIsr request correlationId 1 from controller 0 epoch 1 starting the become-leader transition for partition [scanner-command,0] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,633] TRACE Broker 0 handling LeaderAndIsr request correlationId 1 from controller 0 epoch 1 starting the become-leader transition for partition [scanner-command,7] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,633] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,6] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,634] TRACE Broker 0 handling LeaderAndIsr request correlationId 1 from controller 0 epoch 1 starting the become-leader transition for partition [scanner-command,4] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,634] TRACE Broker 0 handling LeaderAndIsr request correlationId 1 from controller 0 epoch 1 starting the become-leader transition for partition [scanner-command,1] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,634] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,3] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,635] TRACE Broker 0 handling LeaderAndIsr request correlationId 1 from controller 0 epoch 1 starting the become-leader transition for partition [scanner-command,8] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,635] TRACE Broker 0 handling LeaderAndIsr request correlationId 1 from controller 0 epoch 1 starting the become-leader transition for partition [scanner-command,5] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,636] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,9] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,636] TRACE Broker 0 handling LeaderAndIsr request correlationId 1 from controller 0 epoch 1 starting the become-leader transition for partition [scanner-command,2] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,637] TRACE Broker 0 handling LeaderAndIsr request correlationId 1 from controller 0 epoch 1 starting the become-leader transition for partition [scanner-command,9] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,638] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,2] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,639] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,1] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,640] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,4] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,641] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,0] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,642] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,5] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,642] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions scanner-command-5,scanner-command-1,scanner-command-2,scanner-command-8,scanner-command-9,scanner-command-4,scanner-command-6,scanner-command-7,scanner-command-3,scanner-command-0 (kafka.server.ReplicaFetcherManager)
kafka_1 | [2017-04-29 17:59:17,644] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,8] from NonExistentReplica to NewReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,644] INFO [Partition state machine on Controller 0]: Invoking state change to OnlinePartition for partitions [scanner,4],[scanner,1],[scanner,3],[scanner,5],[scanner,9],[scanner,2],[scanner,6],[scanner,8],[scanner,0],[scanner,7] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,645] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner,4] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,645] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner,4] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,681] TRACE Controller 0 epoch 1 changed partition [scanner,4] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,681] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner,1] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,682] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner,1] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,731] TRACE Controller 0 epoch 1 changed partition [scanner,1] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,731] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner,3] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,732] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner,3] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,751] TRACE Controller 0 epoch 1 changed partition [scanner,3] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,751] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner,5] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,751] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner,5] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,755] INFO Completed load of log scanner-command-6 with 1 log segments and log end offset 0 in 80 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,759] INFO Created log for partition [scanner-command,6] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,760] INFO Partition [scanner-command,6] on broker 0: No checkpointed highwatermark is found for partition [scanner-command,6] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,769] TRACE Controller 0 epoch 1 changed partition [scanner,5] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,770] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner,9] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,770] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner,9] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,778] INFO Completed load of log scanner-command-3 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,779] INFO Created log for partition [scanner-command,3] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,779] INFO Partition [scanner-command,3] on broker 0: No checkpointed highwatermark is found for partition [scanner-command,3] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,784] INFO Completed load of log scanner-command-0 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,786] INFO Created log for partition [scanner-command,0] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,786] INFO Partition [scanner-command,0] on broker 0: No checkpointed highwatermark is found for partition [scanner-command,0] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,787] TRACE Controller 0 epoch 1 changed partition [scanner,9] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,787] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner,2] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,787] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner,2] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,793] INFO Completed load of log scanner-command-7 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,794] INFO Created log for partition [scanner-command,7] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,795] INFO Partition [scanner-command,7] on broker 0: No checkpointed highwatermark is found for partition [scanner-command,7] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,801] INFO Completed load of log scanner-command-4 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,803] INFO Created log for partition [scanner-command,4] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,803] INFO Partition [scanner-command,4] on broker 0: No checkpointed highwatermark is found for partition [scanner-command,4] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,805] TRACE Controller 0 epoch 1 changed partition [scanner,2] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,806] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner,6] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,806] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner,6] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,808] INFO Completed load of log scanner-command-1 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,809] INFO Created log for partition [scanner-command,1] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,810] INFO Partition [scanner-command,1] on broker 0: No checkpointed highwatermark is found for partition [scanner-command,1] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,814] INFO Completed load of log scanner-command-8 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,815] INFO Created log for partition [scanner-command,8] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,815] INFO Partition [scanner-command,8] on broker 0: No checkpointed highwatermark is found for partition [scanner-command,8] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,823] TRACE Controller 0 epoch 1 changed partition [scanner,6] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,827] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner,8] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,827] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner,8] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,827] INFO Completed load of log scanner-command-5 with 1 log segments and log end offset 0 in 9 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,829] INFO Created log for partition [scanner-command,5] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,829] INFO Partition [scanner-command,5] on broker 0: No checkpointed highwatermark is found for partition [scanner-command,5] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,835] INFO Completed load of log scanner-command-2 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,837] INFO Created log for partition [scanner-command,2] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,837] INFO Partition [scanner-command,2] on broker 0: No checkpointed highwatermark is found for partition [scanner-command,2] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,844] TRACE Controller 0 epoch 1 changed partition [scanner,8] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,845] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner,0] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,845] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner,0] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,847] INFO Completed load of log scanner-command-9 with 1 log segments and log end offset 0 in 1 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,848] INFO Created log for partition [scanner-command,9] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,848] INFO Partition [scanner-command,9] on broker 0: No checkpointed highwatermark is found for partition [scanner-command,9] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,850] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 1 for partition [scanner-command,9] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,851] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 1 for partition [scanner-command,2] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,851] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 1 for partition [scanner-command,5] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,851] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 1 for partition [scanner-command,8] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,852] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 1 for partition [scanner-command,1] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,852] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 1 for partition [scanner-command,4] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,852] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 1 for partition [scanner-command,7] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,852] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 1 for partition [scanner-command,0] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,853] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 1 for partition [scanner-command,3] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,853] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 1 for partition [scanner-command,6] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,854] TRACE Broker 0 completed LeaderAndIsr request correlationId 1 from controller 0 epoch 1 for the become-leader transition for partition [scanner-command,6] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,854] TRACE Broker 0 completed LeaderAndIsr request correlationId 1 from controller 0 epoch 1 for the become-leader transition for partition [scanner-command,3] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,854] TRACE Broker 0 completed LeaderAndIsr request correlationId 1 from controller 0 epoch 1 for the become-leader transition for partition [scanner-command,0] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,855] TRACE Broker 0 completed LeaderAndIsr request correlationId 1 from controller 0 epoch 1 for the become-leader transition for partition [scanner-command,7] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,855] TRACE Broker 0 completed LeaderAndIsr request correlationId 1 from controller 0 epoch 1 for the become-leader transition for partition [scanner-command,4] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,855] TRACE Broker 0 completed LeaderAndIsr request correlationId 1 from controller 0 epoch 1 for the become-leader transition for partition [scanner-command,1] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,855] TRACE Broker 0 completed LeaderAndIsr request correlationId 1 from controller 0 epoch 1 for the become-leader transition for partition [scanner-command,8] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,856] TRACE Broker 0 completed LeaderAndIsr request correlationId 1 from controller 0 epoch 1 for the become-leader transition for partition [scanner-command,5] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,856] TRACE Broker 0 completed LeaderAndIsr request correlationId 1 from controller 0 epoch 1 for the become-leader transition for partition [scanner-command,2] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,857] TRACE Broker 0 completed LeaderAndIsr request correlationId 1 from controller 0 epoch 1 for the become-leader transition for partition [scanner-command,9] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,863] TRACE Controller 0 epoch 1 received response {error_code=0,partitions=[{topic=scanner-command,partition=6,error_code=0},{topic=scanner-command,partition=3,error_code=0},{topic=scanner-command,partition=8,error_code=0},{topic=scanner-command,partition=0,error_code=0},{topic=scanner-command,partition=5,error_code=0},{topic=scanner-command,partition=2,error_code=0},{topic=scanner-command,partition=7,error_code=0},{topic=scanner-command,partition=1,error_code=0},{topic=scanner-command,partition=4,error_code=0},{topic=scanner-command,partition=9,error_code=0}]} for a request sent to broker kafka:9092 (id: 0 rack: null) (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,865] TRACE Controller 0 epoch 1 changed partition [scanner,0] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,866] DEBUG [Partition state machine on Controller 0]: Live assigned replicas for partition [scanner,7] are: [List(0)] (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,866] DEBUG [Partition state machine on Controller 0]: Initializing leader and isr for partition [scanner,7] to (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.PartitionStateMachine)
kafka_1 | [2017-04-29 17:59:17,870] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-command-7 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,870] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-command-6 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,870] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-command-9 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,871] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-command-8 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,871] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-command-3 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,871] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-command-2 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,871] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-command-5 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,871] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-command-4 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,872] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-command-1 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,872] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-command-0 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,873] TRACE Controller 0 epoch 1 received response {error_code=0} for a request sent to broker kafka:9092 (id: 0 rack: null) (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,890] TRACE Controller 0 epoch 1 changed partition [scanner,7] from NewPartition to OnlinePartition with leader 0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,891] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner,5] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,891] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner,2] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,891] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner,7] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,891] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner,1] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,891] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner,4] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,891] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner,9] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,892] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner,6] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,892] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner,3] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,892] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner,8] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,892] TRACE Controller 0 epoch 1 sending become-leader LeaderAndIsr request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition [scanner,0] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,893] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-5 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,893] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-2 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,893] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-7 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,894] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-1 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,894] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,894] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-9 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,894] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 3 from controller 0 epoch 1 for partition [scanner,9] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,894] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 3 from controller 0 epoch 1 for partition [scanner,8] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,894] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 3 from controller 0 epoch 1 for partition [scanner,1] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,894] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-6 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,894] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 3 from controller 0 epoch 1 for partition [scanner,0] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,894] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 3 from controller 0 epoch 1 for partition [scanner,3] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,894] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-3 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,895] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-8 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,894] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 3 from controller 0 epoch 1 for partition [scanner,2] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,895] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 3 from controller 0 epoch 1 for partition [scanner,5] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,895] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 3 from controller 0 epoch 1 for partition [scanner,4] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,895] TRACE Controller 0 epoch 1 sending UpdateMetadata request (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) to broker 0 for partition scanner-0 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,895] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 3 from controller 0 epoch 1 for partition [scanner,7] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,895] TRACE Broker 0 received LeaderAndIsr request PartitionState(controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0]) correlation id 3 from controller 0 epoch 1 for partition [scanner,6] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,900] TRACE Broker 0 handling LeaderAndIsr request correlationId 3 from controller 0 epoch 1 starting the become-leader transition for partition [scanner,7] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,900] TRACE Broker 0 handling LeaderAndIsr request correlationId 3 from controller 0 epoch 1 starting the become-leader transition for partition [scanner,4] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,900] TRACE Broker 0 handling LeaderAndIsr request correlationId 3 from controller 0 epoch 1 starting the become-leader transition for partition [scanner,1] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,900] TRACE Broker 0 handling LeaderAndIsr request correlationId 3 from controller 0 epoch 1 starting the become-leader transition for partition [scanner,8] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,900] TRACE Broker 0 handling LeaderAndIsr request correlationId 3 from controller 0 epoch 1 starting the become-leader transition for partition [scanner,5] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,900] TRACE Broker 0 handling LeaderAndIsr request correlationId 3 from controller 0 epoch 1 starting the become-leader transition for partition [scanner,2] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,901] TRACE Broker 0 handling LeaderAndIsr request correlationId 3 from controller 0 epoch 1 starting the become-leader transition for partition [scanner,9] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,901] TRACE Broker 0 handling LeaderAndIsr request correlationId 3 from controller 0 epoch 1 starting the become-leader transition for partition [scanner,6] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,901] TRACE Broker 0 handling LeaderAndIsr request correlationId 3 from controller 0 epoch 1 starting the become-leader transition for partition [scanner,3] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,901] TRACE Broker 0 handling LeaderAndIsr request correlationId 3 from controller 0 epoch 1 starting the become-leader transition for partition [scanner,0] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,904] INFO [Replica state machine on controller 0]: Invoking state change to OnlineReplica for replicas [Topic=scanner,Partition=7,Replica=0],[Topic=scanner,Partition=6,Replica=0],[Topic=scanner,Partition=3,Replica=0],[Topic=scanner,Partition=9,Replica=0],[Topic=scanner,Partition=2,Replica=0],[Topic=scanner,Partition=1,Replica=0],[Topic=scanner,Partition=4,Replica=0],[Topic=scanner,Partition=0,Replica=0],[Topic=scanner,Partition=5,Replica=0],[Topic=scanner,Partition=8,Replica=0] (kafka.controller.ReplicaStateMachine)
kafka_1 | [2017-04-29 17:59:17,905] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,7] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,901] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions scanner-4,scanner-9,scanner-5,scanner-0,scanner-6,scanner-1,scanner-7,scanner-3,scanner-2,scanner-8 (kafka.server.ReplicaFetcherManager)
kafka_1 | [2017-04-29 17:59:17,906] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,6] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,907] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,3] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,907] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,9] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,908] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,2] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,908] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,1] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,908] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,4] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,909] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,0] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,909] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,5] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,909] TRACE Controller 0 epoch 1 changed state of replica 0 for partition [scanner,8] from NewReplica to OnlineReplica (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,911] INFO Completed load of log scanner-7 with 1 log segments and log end offset 0 in 1 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,913] INFO Created log for partition [scanner,7] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,915] INFO Partition [scanner,7] on broker 0: No checkpointed highwatermark is found for partition [scanner,7] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,918] INFO Completed load of log scanner-4 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,920] INFO Created log for partition [scanner,4] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,921] INFO Partition [scanner,4] on broker 0: No checkpointed highwatermark is found for partition [scanner,4] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,923] INFO Completed load of log scanner-1 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,924] INFO Created log for partition [scanner,1] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,925] INFO Partition [scanner,1] on broker 0: No checkpointed highwatermark is found for partition [scanner,1] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,928] INFO Completed load of log scanner-8 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,929] INFO Created log for partition [scanner,8] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,930] INFO Partition [scanner,8] on broker 0: No checkpointed highwatermark is found for partition [scanner,8] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,933] INFO Completed load of log scanner-5 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,935] INFO Created log for partition [scanner,5] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,936] INFO Partition [scanner,5] on broker 0: No checkpointed highwatermark is found for partition [scanner,5] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,939] INFO Completed load of log scanner-2 with 1 log segments and log end offset 0 in 1 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,940] INFO Created log for partition [scanner,2] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,940] INFO Partition [scanner,2] on broker 0: No checkpointed highwatermark is found for partition [scanner,2] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,943] INFO Completed load of log scanner-9 with 1 log segments and log end offset 0 in 1 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,944] INFO Created log for partition [scanner,9] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,944] INFO Partition [scanner,9] on broker 0: No checkpointed highwatermark is found for partition [scanner,9] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,947] INFO Completed load of log scanner-6 with 1 log segments and log end offset 0 in 1 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,948] INFO Created log for partition [scanner,6] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,948] INFO Partition [scanner,6] on broker 0: No checkpointed highwatermark is found for partition [scanner,6] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,951] INFO Completed load of log scanner-3 with 1 log segments and log end offset 0 in 1 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,952] INFO Created log for partition [scanner,3] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,952] INFO Partition [scanner,3] on broker 0: No checkpointed highwatermark is found for partition [scanner,3] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,955] INFO Completed load of log scanner-0 with 1 log segments and log end offset 0 in 0 ms (kafka.log.Log)
kafka_1 | [2017-04-29 17:59:17,956] INFO Created log for partition [scanner,0] in /var/lib/kafka/data with properties {compression.type -> producer, message.format.version -> 0.10.1-IV2, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> true, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
kafka_1 | [2017-04-29 17:59:17,957] INFO Partition [scanner,0] on broker 0: No checkpointed highwatermark is found for partition [scanner,0] (kafka.cluster.Partition)
kafka_1 | [2017-04-29 17:59:17,957] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 3 for partition [scanner,0] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,957] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 3 for partition [scanner,3] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,957] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 3 for partition [scanner,6] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,957] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 3 for partition [scanner,9] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,957] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 3 for partition [scanner,2] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,957] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 3 for partition [scanner,5] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,957] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 3 for partition [scanner,8] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,958] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 3 for partition [scanner,1] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,958] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 3 for partition [scanner,4] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,958] TRACE Broker 0 stopped fetchers as part of become-leader request from controller 0 epoch 1 with correlation id 3 for partition [scanner,7] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,958] TRACE Broker 0 completed LeaderAndIsr request correlationId 3 from controller 0 epoch 1 for the become-leader transition for partition [scanner,7] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,958] TRACE Broker 0 completed LeaderAndIsr request correlationId 3 from controller 0 epoch 1 for the become-leader transition for partition [scanner,4] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,958] TRACE Broker 0 completed LeaderAndIsr request correlationId 3 from controller 0 epoch 1 for the become-leader transition for partition [scanner,1] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,958] TRACE Broker 0 completed LeaderAndIsr request correlationId 3 from controller 0 epoch 1 for the become-leader transition for partition [scanner,8] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,959] TRACE Broker 0 completed LeaderAndIsr request correlationId 3 from controller 0 epoch 1 for the become-leader transition for partition [scanner,5] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,959] TRACE Broker 0 completed LeaderAndIsr request correlationId 3 from controller 0 epoch 1 for the become-leader transition for partition [scanner,2] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,959] TRACE Broker 0 completed LeaderAndIsr request correlationId 3 from controller 0 epoch 1 for the become-leader transition for partition [scanner,9] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,959] TRACE Broker 0 completed LeaderAndIsr request correlationId 3 from controller 0 epoch 1 for the become-leader transition for partition [scanner,6] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,959] TRACE Broker 0 completed LeaderAndIsr request correlationId 3 from controller 0 epoch 1 for the become-leader transition for partition [scanner,3] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,959] TRACE Broker 0 completed LeaderAndIsr request correlationId 3 from controller 0 epoch 1 for the become-leader transition for partition [scanner,0] (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,960] TRACE Controller 0 epoch 1 received response {error_code=0,partitions=[{topic=scanner,partition=5,error_code=0},{topic=scanner,partition=2,error_code=0},{topic=scanner,partition=7,error_code=0},{topic=scanner,partition=1,error_code=0},{topic=scanner,partition=4,error_code=0},{topic=scanner,partition=9,error_code=0},{topic=scanner,partition=6,error_code=0},{topic=scanner,partition=3,error_code=0},{topic=scanner,partition=8,error_code=0},{topic=scanner,partition=0,error_code=0}]} for a request sent to broker kafka:9092 (id: 0 rack: null) (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,962] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-9 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,962] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-8 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,962] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-1 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,962] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-0 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,963] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-3 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,963] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-2 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,963] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-5 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,963] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-4 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,964] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-7 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,964] TRACE Broker 0 cached leader info (LeaderAndIsrInfo:(Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),ReplicationFactor:1),AllReplicas:0) for partition scanner-6 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger)
kafka_1 | [2017-04-29 17:59:17,965] TRACE Controller 0 epoch 1 received response {error_code=0} for a request sent to broker kafka:9092 (id: 0 rack: null) (state.change.logger)
kafka_1 | [2017-04-29 18:01:32,831] INFO [Group Metadata Manager on Broker 0]: Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.GroupMetadataManager)
kafka_1 | [2017-04-29 18:01:37,759] TRACE [Controller 0]: checking need to trigger partition rebalance (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 18:01:37,760] DEBUG [Controller 0]: preferred replicas by broker Map(0 -> Map([scanner,5] -> List(0), [scanner,7] -> List(0), [scanner-command,3] -> List(0), [scanner,0] -> List(0), [scanner,1] -> List(0), [scanner,9] -> List(0), [scanner-command,7] -> List(0), [scanner-command,6] -> List(0), [scanner-command,1] -> List(0), [scanner-command,4] -> List(0), [scanner,2] -> List(0), [scanner-command,0] -> List(0), [scanner-command,8] -> List(0), [scanner-command,9] -> List(0), [scanner,6] -> List(0), [scanner,8] -> List(0), [scanner-command,5] -> List(0), [scanner,4] -> List(0), [scanner-command,2] -> List(0), [scanner,3] -> List(0))) (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 18:01:37,763] DEBUG [Controller 0]: topics not in preferred replica Map() (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 18:01:37,765] TRACE [Controller 0]: leader imbalance ratio for broker 0 is 0.000000 (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 18:06:37,759] TRACE [Controller 0]: checking need to trigger partition rebalance (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 18:06:37,760] DEBUG [Controller 0]: preferred replicas by broker Map(0 -> Map([scanner,5] -> List(0), [scanner,7] -> List(0), [scanner-command,3] -> List(0), [scanner,0] -> List(0), [scanner,1] -> List(0), [scanner,9] -> List(0), [scanner-command,7] -> List(0), [scanner-command,6] -> List(0), [scanner-command,1] -> List(0), [scanner-command,4] -> List(0), [scanner,2] -> List(0), [scanner-command,0] -> List(0), [scanner-command,8] -> List(0), [scanner-command,9] -> List(0), [scanner,6] -> List(0), [scanner,8] -> List(0), [scanner-command,5] -> List(0), [scanner,4] -> List(0), [scanner-command,2] -> List(0), [scanner,3] -> List(0))) (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 18:06:37,761] DEBUG [Controller 0]: topics not in preferred replica Map() (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 18:06:37,761] TRACE [Controller 0]: leader imbalance ratio for broker 0 is 0.000000 (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 18:11:32,831] INFO [Group Metadata Manager on Broker 0]: Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.GroupMetadataManager)
kafka_1 | [2017-04-29 18:11:37,758] TRACE [Controller 0]: checking need to trigger partition rebalance (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 18:11:37,760] DEBUG [Controller 0]: preferred replicas by broker Map(0 -> Map([scanner,5] -> List(0), [scanner,7] -> List(0), [scanner-command,3] -> List(0), [scanner,0] -> List(0), [scanner,1] -> List(0), [scanner,9] -> List(0), [scanner-command,7] -> List(0), [scanner-command,6] -> List(0), [scanner-command,1] -> List(0), [scanner-command,4] -> List(0), [scanner,2] -> List(0), [scanner-command,0] -> List(0), [scanner-command,8] -> List(0), [scanner-command,9] -> List(0), [scanner,6] -> List(0), [scanner,8] -> List(0), [scanner-command,5] -> List(0), [scanner,4] -> List(0), [scanner-command,2] -> List(0), [scanner,3] -> List(0))) (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 18:11:37,760] DEBUG [Controller 0]: topics not in preferred replica Map() (kafka.controller.KafkaController)
kafka_1 | [2017-04-29 18:11:37,761] TRACE [Controller 0]: leader imbalance ratio for broker 0 is 0.000000 (kafka.controller.KafkaController)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment