Skip to content

Instantly share code, notes, and snippets.

@atharvai
Created March 6, 2017 09:57
Show Gist options
  • Save atharvai/960e77f2261832325276fe992fcc7e04 to your computer and use it in GitHub Desktop.
Save atharvai/960e77f2261832325276fe992fcc7e04 to your computer and use it in GitHub Desktop.
kafka-connect S3 sink NullPointerExceptionlog
[2017-03-03 21:03:13,774] INFO Connector S3SinkEventsCommunityVideoShare config updated (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:14,463] INFO Rebalance started (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:14,463] INFO Finished stopping tasks in preparation for rebalance (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:14,463] INFO (Re-)joining group kafka-connect (org.apache.kafka.clients.consumer.internals.AbstractCoordinator)
[2017-03-03 21:03:14,464] INFO 172.17.0.1 - - [03/Mar/2017:21:03:13 +0000] "POST /connectors HTTP/1.1" 201 850 706 (org.apache.kafka.connect.runtime.rest.RestServer)
(io.confluent.connect.storage.hive.HiveConfig)
[2017-03-03 21:03:14,465] INFO Joined group and got assignment: Assignment{error=0, leader='connect-1-1f5856ec-ee7c-4d28-ba2c-cc30ffea582b', leaderUrl='http://leader:28082/', offset=15, connectorIds=[S3SinkEventsCommunityVideoShare], taskIds=[S3SinkEventsCommunityVideoShare-0]} (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:14,465] INFO Starting connectors and tasks using config offset 15 (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:14,464] INFO Successfully joined group kafka-connect with generation 26 (org.apache.kafka.clients.consumer.internals.AbstractCoordinator)
[2017-03-03 21:03:14,465] INFO ConnectorConfig valuesconnector.class = io.confluent.connect.s3.S3SinkConnector
key.converter = null
name = S3SinkEventsCommunityVideoShare
tasks.max = value.converter = null
(org.apache.kafka.connect.runtime.ConnectorConfig)
[2017-03-03 21:03:14,465] INFO Creating connector S3SinkEventsCommunityVideoShare of type io.confluent.connect.s3.S3SinkConnector (org.apache.kafka.connect.runtime.Worker)
[2017-03-03 21:03:14,465] INFO Instantiated connector S3SinkEventsCommunityVideoShare with version 3.2.0 of type class io.confluent.connect.s3.S3SinkConnector (org.apache.kafka.connect.runtime.Worker)
[2017-03-03 21:03:14,465] INFO S3SinkConnectorConfig valuesfilename.offset.zero.pad.width = flush.size = format.class = class io.confluent.connect.s3.format.json.JsonFormat
retry.backoff.ms = rotate.interval.ms = -rotate.schedule.interval.ms = -s3.bucket.name = my-bucket
s3.credentials.provider.class = class com.amazonaws.auth.DefaultAWSCredentialsProviderChain
s3.part.size = s3.region = us-east-s3.ssea.name =
s3.wan.mode = false
schema.cache.size = shutdown.timeout.ms = (io.confluent.connect.s3.S3SinkConnectorConfig)
[2017-03-03 21:03:14,465] INFO StorageCommonConfig valuesdirectory.delim = /
file.delim = +
storage.class = class io.confluent.connect.s3.storage.S3Storage
store.url = null
topics.dir = topics
(io.confluent.connect.storage.common.StorageCommonConfig)
[2017-03-03 21:03:14,465] INFO HiveConfig valueshive.conf.dir =
hive.database = default
hive.home =
hive.integration = false
hive.metastore.uris =
schema.compatibility = NONE
[2017-03-03 21:03:14,465] INFO Starting connector S3SinkEventsCommunityVideoShare (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:14,466] INFO PartitionerConfig valueslocale = en
(org.apache.kafka.connect.runtime.SourceConnectorConfig)
partition.field.name =
partitioner.class = class io.confluent.connect.storage.partitioner.TimeBasedPartitioner
path.format = 'distributary'/'events'/'event_community_video_share'/YYYY/MM/DD/HH/
schema.generator.class = class io.confluent.connect.storage.hive.schema.TimeBasedSchemaGenerator
timezone = UTC
(io.confluent.connect.storage.partitioner.PartitionerConfig)
[2017-03-03 21:03:14,466] INFO Starting S3 connector S3SinkEventsCommunityVideoShare (io.confluent.connect.s3.S3SinkConnector)
[2017-03-03 21:03:14,466] INFO Finished creating connector S3SinkEventsCommunityVideoShare (org.apache.kafka.connect.runtime.Worker)
[2017-03-03 21:03:14,466] INFO SourceConnectorConfig valuesconnector.class = io.confluent.connect.s3.S3SinkConnector
key.converter = null
name = S3SinkEventsCommunityVideoShare
tasks.max = value.converter = null
partition.duration.ms = [2017-03-03 21:03:15,585] INFO Tasks [S3SinkEventsCommunityVideoShare-0] configs updated (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:16,216] INFO Starting task S3SinkEventsCommunityVideoShare-0 (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:16,216] INFO Creating task S3SinkEventsCommunityVideoShare-0 (org.apache.kafka.connect.runtime.Worker)
[2017-03-03 21:03:16,216] INFO ConnectorConfig valuesconnector.class = io.confluent.connect.s3.S3SinkConnector
key.converter = null
name = S3SinkEventsCommunityVideoShare
tasks.max = value.converter = null
(org.apache.kafka.connect.runtime.ConnectorConfig)
[2017-03-03 21:03:16,216] INFO TaskConfig valuestask.class = class io.confluent.connect.s3.S3SinkTask
(org.apache.kafka.connect.runtime.TaskConfig)
[2017-03-03 21:03:16,216] INFO Instantiated task S3SinkEventsCommunityVideoShare-0 with version 3.2.0 of type io.confluent.connect.s3.S3SinkTask (org.apache.kafka.connect.runtime.Worker)
(org.apache.kafka.clients.consumer.ConsumerConfig)
auto.commit.interval.ms = auto.offset.reset = earliest
bootstrap.servers = [broker1:9092, broker2:9092, broker3:9092]
check.crcs = true
client.id =
connections.max.idle.ms = enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = fetch.max.wait.ms = fetch.min.bytes = group.id = connect-S3SinkEventsCommunityVideoShare
heartbeat.interval.ms = interceptor.classes = null
key.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
max.partition.fetch.bytes = max.poll.interval.ms = max.poll.records = metadata.max.age.ms = metric.reporters = []
metrics.num.samples = metrics.sample.window.ms = partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = reconnect.backoff.ms = request.timeout.ms = retry.backoff.ms = sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.sasl.kerberos.ticket.renew.window.factor = 0.sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = [2017-03-03 21:03:16,216] INFO ConsumerConfig valuessession.timeout.ms = ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunXssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
(org.apache.kafka.clients.consumer.ConsumerConfig)
[2017-03-03 21:03:16,217] INFO ConsumerConfig valuesauto.commit.interval.ms = auto.offset.reset = earliest
bootstrap.servers = [broker1:9092, broker2:9092, broker3:9092]
check.crcs = true
client.id = consumer-connections.max.idle.ms = enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = fetch.max.wait.ms = fetch.min.bytes = group.id = connect-S3SinkEventsCommunityVideoShare
heartbeat.interval.ms = interceptor.classes = null
key.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
max.partition.fetch.bytes = max.poll.interval.ms = max.poll.records = metadata.max.age.ms = metric.reporters = []
metrics.num.samples = metrics.sample.window.ms = partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = reconnect.backoff.ms = request.timeout.ms = retry.backoff.ms = sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.sasl.kerberos.ticket.renew.window.factor = 0.sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = session.timeout.ms = ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunXssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
ssl.cipher.suites = null
[2017-03-03 21:03:16,218] INFO Kafka version : 0.10.1.1-cp1 (org.apache.kafka.common.utils.AppInfoParser)
[2017-03-03 21:03:16,218] INFO Kafka commitId : 4548cdefdfc745ce (org.apache.kafka.common.utils.AppInfoParser)
[2017-03-03 21:03:16,218] INFO Finished starting connectors and tasks (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:16,218] INFO Rebalance started (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:16,218] INFO Stopping connector S3SinkEventsCommunityVideoShare (org.apache.kafka.connect.runtime.Worker)
[2017-03-03 21:03:16,218] INFO Shutting down S3 connector S3SinkEventsCommunityVideoShare (io.confluent.connect.s3.S3SinkConnector)
[2017-03-03 21:03:16,219] INFO Stopped connector S3SinkEventsCommunityVideoShare (org.apache.kafka.connect.runtime.Worker)
[2017-03-03 21:03:16,219] INFO Stopping task S3SinkEventsCommunityVideoShare-0 (org.apache.kafka.connect.runtime.Worker)
[2017-03-03 21:03:16,241] INFO Finished stopping tasks in preparation for rebalance (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:16,241] INFO (Re-)joining group kafka-connect (org.apache.kafka.clients.consumer.internals.AbstractCoordinator)
(io.confluent.connect.storage.partitioner.PartitionerConfig)
[2017-03-03 21:03:16,272] INFO Joined group and got assignment: Assignment{error=0, leader='connect-1-1f5856ec-ee7c-4d28-ba2c-cc30ffea582b', leaderUrl='http://leader:28082/', offset=5, connectorIds=[S3SinkEventsCommunityVideoShare], taskIds=[S3SinkEventsCommunityVideoShare-0]} (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:16,272] INFO Starting connectors and tasks using config offset 5 (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:16,272] INFO Starting connector S3SinkEventsCommunityVideoShare (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:16,273] INFO ConnectorConfig valuesconnector.class = io.confluent.connect.s3.S3SinkConnector
key.converter = null
name = S3SinkEventsCommunityVideoShare
tasks.max = value.converter = null
(org.apache.kafka.connect.runtime.ConnectorConfig)
[2017-03-03 21:03:16,273] INFO Creating connector S3SinkEventsCommunityVideoShare of type io.confluent.connect.s3.S3SinkConnector (org.apache.kafka.connect.runtime.Worker)
[2017-03-03 21:03:16,273] INFO Instantiated connector S3SinkEventsCommunityVideoShare with version 3.2.0 of type class io.confluent.connect.s3.S3SinkConnector (org.apache.kafka.connect.runtime.Worker)
[2017-03-03 21:03:16,273] INFO S3SinkConnectorConfig valuesfilename.offset.zero.pad.width = flush.size = format.class = class io.confluent.connect.s3.format.json.JsonFormat
retry.backoff.ms = rotate.interval.ms = -rotate.schedule.interval.ms = -s3.bucket.name = my-bucket
s3.credentials.provider.class = class com.amazonaws.auth.DefaultAWSCredentialsProviderChain
s3.part.size = s3.region = us-east-s3.ssea.name =
s3.wan.mode = false
schema.cache.size = shutdown.timeout.ms = (io.confluent.connect.s3.S3SinkConnectorConfig)
[2017-03-03 21:03:16,273] INFO StorageCommonConfig valuesdirectory.delim = /
file.delim = +
storage.class = class io.confluent.connect.s3.storage.S3Storage
store.url = null
topics.dir = topics
(io.confluent.connect.storage.common.StorageCommonConfig)
[2017-03-03 21:03:16,273] INFO HiveConfig valueshive.conf.dir =
hive.database = default
hive.home =
hive.integration = false
hive.metastore.uris =
schema.compatibility = NONE
(io.confluent.connect.storage.hive.HiveConfig)
[2017-03-03 21:03:16,273] INFO PartitionerConfig valueslocale = en
partition.duration.ms = partition.field.name =
partitioner.class = class io.confluent.connect.storage.partitioner.TimeBasedPartitioner
path.format = 'distributary'/'events'/'event_community_video_share'/YYYY/MM/DD/HH/
schema.generator.class = class io.confluent.connect.storage.hive.schema.TimeBasedSchemaGenerator
timezone = UTC
[2017-03-03 21:03:16,272] INFO Successfully joined group kafka-connect with generation 27 (org.apache.kafka.clients.consumer.internals.AbstractCoordinator)
[2017-03-03 21:03:16,273] INFO Starting S3 connector S3SinkEventsCommunityVideoShare (io.confluent.connect.s3.S3SinkConnector)
(org.apache.kafka.connect.runtime.TaskConfig)
[2017-03-03 21:03:16,274] INFO SourceConnectorConfig valuesconnector.class = io.confluent.connect.s3.S3SinkConnector
key.converter = null
name = S3SinkEventsCommunityVideoShare
tasks.max = value.converter = null
(org.apache.kafka.connect.runtime.SourceConnectorConfig)
[2017-03-03 21:03:16,274] INFO Starting task S3SinkEventsCommunityVideoShare-0 (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
[2017-03-03 21:03:16,273] INFO Finished creating connector S3SinkEventsCommunityVideoShare (org.apache.kafka.connect.runtime.Worker)
[2017-03-03 21:03:16,274] INFO ConnectorConfig valuesconnector.class = io.confluent.connect.s3.S3SinkConnector
key.converter = null
name = S3SinkEventsCommunityVideoShare
[2017-03-03 21:03:16,274] INFO Instantiated task S3SinkEventsCommunityVideoShare-0 with version 3.2.0 of type io.confluent.connect.s3.S3SinkTask (org.apache.kafka.connect.runtime.Worker)
value.converter = null
(org.apache.kafka.connect.runtime.ConnectorConfig)
[2017-03-03 21:03:16,274] INFO TaskConfig valuestask.class = class io.confluent.connect.s3.S3SinkTask
[2017-03-03 21:03:16,274] INFO Creating task S3SinkEventsCommunityVideoShare-0 (org.apache.kafka.connect.runtime.Worker)
tasks.max = (org.apache.kafka.clients.consumer.ConsumerConfig)
auto.commit.interval.ms = auto.offset.reset = earliest
bootstrap.servers = [broker1:9092, broker2:9092, broker3:9092]
check.crcs = true
client.id =
connections.max.idle.ms = enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = fetch.max.wait.ms = fetch.min.bytes = group.id = connect-S3SinkEventsCommunityVideoShare
heartbeat.interval.ms = interceptor.classes = null
key.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
max.partition.fetch.bytes = max.poll.interval.ms = max.poll.records = metadata.max.age.ms = metric.reporters = []
metrics.num.samples = metrics.sample.window.ms = partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = reconnect.backoff.ms = request.timeout.ms = retry.backoff.ms = sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.sasl.kerberos.ticket.renew.window.factor = 0.sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = session.timeout.ms = ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunXssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
(org.apache.kafka.clients.consumer.ConsumerConfig)
[2017-03-03 21:03:16,275] INFO ConsumerConfig valuesauto.commit.interval.ms = auto.offset.reset = earliest
bootstrap.servers = [broker1:9092, broker2:9092, broker3:9092]
check.crcs = true
client.id = consumer-connections.max.idle.ms = enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = fetch.max.wait.ms = fetch.min.bytes = group.id = connect-S3SinkEventsCommunityVideoShare
heartbeat.interval.ms = interceptor.classes = null
key.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
max.partition.fetch.bytes = max.poll.interval.ms = max.poll.records = metadata.max.age.ms = metric.reporters = []
metrics.num.samples = metrics.sample.window.ms = partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = reconnect.backoff.ms = request.timeout.ms = retry.backoff.ms = sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.sasl.kerberos.ticket.renew.window.factor = 0.sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = session.timeout.ms = ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunXssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
[2017-03-03 21:03:16,274] INFO ConsumerConfig values[2017-03-03 21:03:16,276] INFO Kafka version : 0.10.1.1-cp1 (org.apache.kafka.common.utils.AppInfoParser)
[2017-03-03 21:03:16,276] INFO Kafka commitId : 4548cdefdfc745ce (org.apache.kafka.common.utils.AppInfoParser)
[2017-03-03 21:03:16,277] INFO Finished starting connectors and tasks (org.apache.kafka.connect.runtime.distributed.DistributedHerder)
flush.size = filename.offset.zero.pad.width = [2017-03-03 21:03:16,277] INFO S3SinkConnectorConfig valuesformat.class = class io.confluent.connect.s3.format.json.JsonFormat
retry.backoff.ms = rotate.interval.ms = -rotate.schedule.interval.ms = -(io.confluent.connect.storage.partitioner.PartitionerConfig)
s3.credentials.provider.class = class com.amazonaws.auth.DefaultAWSCredentialsProviderChain
s3.part.size = s3.region = us-east-s3.ssea.name =
s3.wan.mode = false
schema.cache.size = shutdown.timeout.ms = (io.confluent.connect.s3.S3SinkConnectorConfig)
[2017-03-03 21:03:16,278] INFO StorageCommonConfig valuesdirectory.delim = /
file.delim = +
storage.class = class io.confluent.connect.s3.storage.S3Storage
store.url = null
topics.dir = topics
(io.confluent.connect.storage.common.StorageCommonConfig)
[2017-03-03 21:03:16,278] INFO HiveConfig valueshive.conf.dir =
hive.database = default
hive.home =
hive.integration = false
hive.metastore.uris =
schema.compatibility = NONE
(io.confluent.connect.storage.hive.HiveConfig)
locale = en
[2017-03-03 21:03:16,278] INFO PartitionerConfig valuespartition.duration.ms = partition.field.name =
partitioner.class = class io.confluent.connect.storage.partitioner.TimeBasedPartitioner
path.format = 'distributary'/'events'/'event_community_video_share'/YYYY/MM/DD/HH/
schema.generator.class = class io.confluent.connect.storage.hive.schema.TimeBasedSchemaGenerator
timezone = UTC
s3.bucket.name = my-bucket
[2017-03-03 21:03:16,363] INFO Sink task WorkerSinkTask{id=S3SinkEventsCommunityVideoShare-0} finished initialization and start (org.apache.kafka.connect.runtime.WorkerSinkTask)
[2017-03-03 21:03:16,363] INFO Started S3 connector task with assigned partitions: [] (io.confluent.connect.s3.S3SinkTask)
[2017-03-03 21:03:16,464] INFO Discovered coordinator ip-172-31-45-102.ec2.internal:9092 (id: 2147482636 rack: null) for group connect-S3SinkEventsCommunityVideoShare. (org.apache.kafka.clients.consumer.internals.AbstractCoordinator)
[2017-03-03 21:03:16,477] INFO Revoking previously assigned partitions [] for group connect-S3SinkEventsCommunityVideoShare (org.apache.kafka.clients.consumer.internals.ConsumerCoordinator)
[2017-03-03 21:03:16,477] INFO (Re-)joining group connect-S3SinkEventsCommunityVideoShare (org.apache.kafka.clients.consumer.internals.AbstractCoordinator)
[2017-03-03 21:03:16,482] INFO Successfully joined group connect-S3SinkEventsCommunityVideoShare with generation 1 (org.apache.kafka.clients.consumer.internals.AbstractCoordinator)
[2017-03-03 21:03:16,483] INFO Setting newly assigned partitions [event_community_video_share-17, event_community_video_share-46, event_community_video_share-13, event_community_video_share-42, event_community_video_share-9, event_community_video_share-38, event_community_video_share-5, event_community_video_share-34, event_community_video_share-33, event_community_video_share-0, event_community_v
[2017-03-03 21:03:16,538] ERROR Task is being killed and will not recover until manually restarted (org.apache.kafka.connect.runtime.WorkerTask)
java.lang.NullPointerException
at io.confluent.connect.s3.S3SinkTask.close(S3SinkTask.java:189)
at org.apache.kafka.connect.runtime.WorkerSinkTask.commitOffsets(WorkerSinkTask.java:302)
at org.apache.kafka.connect.runtime.WorkerSinkTask.closePartitions(WorkerSinkTask.java:435)
at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:147)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:140)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:175)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
[2017-03-03 21:03:16,538] ERROR Task S3SinkEventsCommunityVideoShare-0 threw an uncaught and unrecoverable exception (org.apache.kafka.connect.runtime.WorkerTask)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment