Skip to content

Instantly share code, notes, and snippets.

chroot_env {
"/lib/x86_64-linux-gnu" = "/lib/x86_64-linux-gnu"
"/lib64" = "/lib64"
"/usr/lib" = "/usr/lib"
"/etc/ld.so.cache" = "/etc/ld.so.cache"
"/etc/ld.so.conf" = "/etc/ld.so.conf"
"/etc/ld.so.conf.d" = "/etc/ld.so.conf.d"
"/etc/resolvconf" = "/etc/resolvconf"
"/run/resolvconf/resolv.conf" = "/etc/resolv.conf"
Running on machine: mesos-slave0
[DIWEF]mmdd hh:mm:ss.uuuuuu pid file:line] msg
Command line: /usr/sbin/thermos_observer --root=/var/run/thermos --port=1338 --log_to_disk=DEBUG --log_to_stderr=google:INFO
I1020 20:33:40.410945 21698 static_assets.py:34] detecting assets...
I1020 20:33:40.412066 21698 static_assets.py:38] detected asset: observer.js
I1020 20:33:40.412518 21698 static_assets.py:38] detected asset: jquery.pailer.js
I1020 20:33:40.412724 21698 static_assets.py:38] detected asset: bootstrap.css
I1020 20:33:40.413183 21698 static_assets.py:38] detected asset: favicon.ico
I1020 20:33:40.413517 21698 static_assets.py:38] detected asset: jquery.js
com.twitter.common.util.StateMachine$IllegalStateTransitionException: State transition from CONSTRUCTED to STOPPED is not allowed.
at com.twitter.common.util.StateMachine.transition(StateMachine.java:183)
at org.apache.aurora.scheduler.storage.CallOrderEnforcingStorage.stop(CallOrderEnforcingStorage.java:109)
at org.apache.aurora.scheduler.SchedulerLifecycle$8.execute(SchedulerLifecycle.java:334)
at org.apache.aurora.scheduler.SchedulerLifecycle$8.execute(SchedulerLifecycle.java:308)
at com.twitter.common.base.Closures$4.execute(Closures.java:120)
at com.twitter.common.base.Closures$3.execute(Closures.java:98)
at com.twitter.common.util.StateMachine.transition(StateMachine.java:191)
at org.apache.aurora.scheduler.SchedulerLifecycle$4.execute(SchedulerLifecycle.java:234)
at com.twitter.common.application.ShutdownRegistry$ShutdownRegistryImpl.execute(ShutdownRegistry.java:88)
[2015-07-23 06:00:00,085] ERROR [KafkaApi-1] Error processing ProducerRequest with correlation id 1647844 from client samza_producer-analytics_2-1-1437158919583-2 on partition [input,3] (kafka.server.KafkaApis)
kafka.message.InvalidMessageException: Message found with corrupt size (0)
at kafka.message.ByteBufferMessageSet$$anon$1.makeNextOuter(ByteBufferMessageSet.scala:159)
at kafka.message.ByteBufferMessageSet$$anon$1.makeNext(ByteBufferMessageSet.scala:192)
at kafka.message.ByteBufferMessageSet$$anon$1.makeNext(ByteBufferMessageSet.scala:146)
at kafka.utils.IteratorTemplate.maybeComputeNext(IteratorTemplate.scala:66)
at kafka.utils.IteratorTemplate.hasNext(IteratorTemplate.scala:58)
at kafka.message.ByteBufferMessageSet$$anon$1.innerDone(ByteBufferMessageSet.scala:150)
at kafka.message.ByteBufferMessageSet$$anon$1.makeNext(ByteBufferMessageSet.scala:191)
at kafka.message.ByteBufferMessageSet$$anon$1.makeNext(ByteBufferMessageSet.scala:146)
@jshaw86
jshaw86 / gist:c618d89c5428aeaadf35
Created April 30, 2015 00:24
Samza Changelog Bug
2015-04-29 17:00:15 SamzaContainerExceptionHandler [ERROR] Uncaught exception in thread (name=main). Exiting process now.
org.apache.samza.system.kafka.KafkaSystemAdmin$KafkaChangelogException: Changelog topic validation failed for topic ACTIVE_KEYS-changelog4 because partition count 6 did not match expected partition count of 7
at org.apache.samza.system.kafka.KafkaSystemAdmin$$anonfun$validateTopicInKafka$2.apply(KafkaSystemAdmin.scala:327)
at org.apache.samza.system.kafka.KafkaSystemAdmin$$anonfun$validateTopicInKafka$2.apply(KafkaSystemAdmin.scala:319)
at org.apache.samza.util.ExponentialSleepStrategy.run(ExponentialSleepStrategy.scala:82)
at org.apache.samza.system.kafka.KafkaSystemAdmin.validateTopicInKafka(KafkaSystemAdmin.scala:318)
at org.apache.samza.system.kafka.KafkaSystemAdmin.createChangelogStream(KafkaSystemAdmin.scala:354)
at org.apache.samza.storage.TaskStorageManager$$anonfun$createStreams$3.apply(TaskStorageManager.scala:86)
at org.apache.samza.storage.TaskStorageManager$$anonfun$cre
@jshaw86
jshaw86 / gist:9c09a16112eee440f681
Created February 9, 2015 05:25
Perf Samza Task
(ns tasks.GenericTask
"
Generic Task that read dsl
"
(:import (org.apache.samza.task
StreamTask WindowableTask MessageCollector TaskCoordinator))
(:import (org.apache.samza.system
IncomingMessageEnvelope OutgoingMessageEnvelope SystemStream))
(:use [slingshot.slingshot :only [throw+ try+]])
(:require [tasks.mixins :as task])
@jshaw86
jshaw86 / gist:0bdd4d5bb1e233cd0b3f
Created February 9, 2015 05:15
Kafka Perf m3.2xlarge raid-0

m3.2xl Raid-0

     | Payload  | Partitions | Uncompressed Rate (MB/s and msg/sec) | Snappy Compressed Rate (MB/s)

---------|----------|-----------|--------------------------|----------------------------- Producer 1t | 1,000,000 @ 7.5kb / 200 | 1 | 57.61 / 8055 | 135.83 / 18991 Producer 3t | 1,000,000 @ 7.5kb / 200 | 1 | 109.34 / 15286 | 253.80 / 35484 Consumer 1MB | 1,000,000 @ 7.5kb | 1 | 111.25 / 15554 | 126.95 / 17749 Producer 1t | 1,000,000 @ 7.5kb / 200 | 2 | 57.59 / 8052 | 142.28 / 19892.58 Producer 3t | 1,000,000 @ 7.5kb / 200 | 2 | 105.18 / 14705 | 280.96 / 39281 Consumer 1MB | 1,000,000 @ 7.5kb | 2 | 112.07 / 15669 | 347.02 / 48517 Consumer 1MB 2t | 1,000,000 @ 7.5kb | 2 | no significant improvement | no significant improvement

#
# Job and I/O
#
job.name=generic
task.class=GenericTask
task.inputs=kafka.test
systems.kafka.streams.test.samza.msg.serde=cljmsgpack
class CometChatPubnub extends PubNub{
function history($args){
$args['timetoken'] = 10000;
$this->subscribe($args);
}
}
function chan1_callback(message,envelope){
/* do stuff for chan1 */
}
function chan2_callback(message,envelope){
/* do stuff for chan2 */
}