Skip to content

Instantly share code, notes, and snippets.

@ankurcha
ankurcha / remove_db.js
Created April 25, 2014 19:19
This is a set of scripts we use to reliably drop sharded databases on a MongoDB cluster. The reason for doing this is because mongos is very unreliable when it comes to cleanly and reliably dropping stuff.
function removeDB(n) {
flushConfigs();
var colls = findChunkedCollections(n);
colls.forEach(function(coll) {
if(!db.getSisterDB(n).getCollection(coll).exists()) {
print("collection did not exist for chunk: " + n + "." + coll)
getPrimaries().forEach(function(p){
p.getDB(n).createCollection(coll);
});
@ankurcha
ankurcha / api-agent.conf
Created June 19, 2014 09:30
logstash agent.conf
input {
file {
type => "analytics-api"
path => "/var/log/analytics-api/analytics-api.log"
}
file {
type => "mongos"
path => ["/var/log/mongodb/mongos_rolling.log", "/var/log/mongodb/mongos_historical.log", "/var/log/mongodb/mongos_collector.log"]
}
file {
/*
var context = cubism.context(),
cube = context.cube("https://data.brightcove.com/v1/accounts/8523")
cube.metric("video_view")
*/
cubism_contextPrototype.analytics_api = function(url) {
if (!arguments.length) url = "";
var source = {},
context = this;
@ankurcha
ankurcha / knife_dl.sh
Created August 28, 2014 22:00
Function to download and extract cookbook
function knife_dl() {
local packages="$@"
cd ~/chef/cookbooks
for c in ${packages}; do
knife cookbook site download ${c};
tar zxf ${c}*.tar.gz;
rm ${c}-*.tar.gz;
done
}
@ankurcha
ankurcha / java_installer.sh
Created September 24, 2014 19:16
Installer script for JDKs
#!/bin/bash
JDK_FILE="jdk-8u20-linux-x64.tar.gz"
JDK_URL="https://s3.amazonaws.com/com.brightcove.rna.repo.dev/${JDK_FILE}"
JDK_ARCHIVE="${HOME}/${JDK_FILE}"
JDKS_DIR="${HOME}/jdks"
JDK_DIR="${JDKS_DIR}/jdk1.8.0_20"
# ensure jdks directory exists
mkdir -p $JDKS_DIR
@ankurcha
ankurcha / QosFilter.java
Last active August 29, 2015 14:07
Account level QoSFilter for concurrent connections
package analytics.api;
import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap;
import org.eclipse.jetty.continuation.Continuation;
import org.eclipse.jetty.continuation.ContinuationSupport;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.*;
import javax.servlet.http.HttpServletRequest;

Keybase proof

I hereby claim:

  • I am ankurcha on github.
  • I am ankurcha (https://keybase.io/ankurcha) on keybase.
  • I have a public key whose fingerprint is 3444 C079 714A 6AF2 6529 3930 1C33 7DEB 6D46 1C4A

To claim this, I am signing this object:

achauhan at pinecone in ~/Downloads/spark-1.3.1-bin-hadoop2.6
$ bin/spark-submit --master local[8] --class com.brightcove.analytics.tacoma.Main ~/Projects/tacoma/target/scala-2.10/tacoma-*.jar
log4j:WARN No appenders could be found for logger (org.apache.hadoop.metrics2.lib.MutableMetricsFactory).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
15/05/12 14:46:11 INFO CheckpointReader: Checkpoint files found: file:/tmp/checkpointDir_tacoma/checkpoint-1431467097000,file:/tmp/checkpointDir_tacoma/checkpoint-1431467096000,file:/tmp/checkpointDir_tacoma/checkpoint-1431467095000,file:/tmp/checkpointDir_tacoma/checkpoint-1431467094000,file:/tmp/checkpointDir_tacoma/checkpoint-1431467093000,file:/tmp/checkpointDir_tacoma/checkpoint-1431467092000,file:/tmp/checkpointDir_tacoma/checkpoint-1431467091000,file:/tmp/checkpointDir_tacoma/checkpoint-143146
#!/bin/bash
# This script emits the task host:port for the DEPENDENCY_URI relative to the
# MARATHON_APP_ID. This is useful when getting the mongodb host:port in the
# current application pod which is located at '../../database/mongo/tasks'.
## MARATHON_HOSTS
## MARATHON_APP_ID
## DEPENDENCY_URI
ubuntu@i-644498b2-mesos-slave-us-east-1e:~/spark-1.5.0-SNAPSHOT-bin-2.2.0$ bin/spark-shell --verbose
Using properties file: /home/ubuntu/spark-1.5.0-SNAPSHOT-bin-2.2.0/conf/spark-defaults.conf
Adding default property: spark.serializer=org.apache.spark.serializer.KryoSerializer
Adding default property: spark.driver.memory=5g
Adding default property: spark.mesos.constraints=zone:us-east-1a
Adding default property: spark.master=mesos://zk://10.96.239.120:2181,10.96.248.254:2181,10.96.218.65:2181/mesos_qa
Adding default property: spark.executor.uri=http://com.brightcove.rna.repo.dev.s3.amazonaws.com/spark-1.5.0-SNAPSHOT-bin-2.2.0.tgz
Parsed arguments:
master mesos://zk://10.96.239.120:2181,10.96.248.254:2181,10.96.218.65:2181/mesos_qa
deployMode null