Skip to content

Instantly share code, notes, and snippets.

# install
export MESOS_NATIVE_LIBRARY=/usr/local/lib/libmesos.dylib
git clone https://github.com/mesos/chronos.git
cd chronos
#must have node installed
mvn package
#start on port 8081
java -cp target/chronos*.jar org.apache.mesos.chronos.scheduler.Main --master 127.0.0.1:5050 --zk_hosts localhost:2181 --http_port 8081
#list jobs
curl -L -X GET localhost:8081/scheduler/jobs
@nsabharwal
nsabharwal / Marathon
Created May 17, 2016 00:44
Marathon
# start loacal zk
/var/root/zookeeper-3.4.8/bin/zkServer.sh start
#start marathon
#marathon is connecting to the existing mesos instance instance connecting to it's own embeded instance
#--master 127.0.0.1:5050 and zk://localhost:2181/mesos
MESOS_NATIVE_JAVA_LIBRARY=/Users/nsabharwal/mesos-0.28.1/build/src/.libs/libmesos.dylib
/var/root/marathon-1.1.1/bin/start --master 127.0.0.1:5050 --zk zk://localhost:2181/mesos
##########################################################
#run mesos
#spin up zk instance
#start marathon
MESOS_NATIVE_JAVA_LIBRARY=/Users/nsabharwal/mesos-0.28.1/build/src/.libs/libmesos.dylib ./bin/start --master local --zk zk://localhost:2181/marathon
boot2docker shellinit
docker run -d -p 8080:80 --name web nginx
docker exec -it web bash
echo myself for president > /usr/share/nginx/html/index.html
docker inspect web | grep IP
docker inspect -f '{{json .NetworkSettings.IPAddress}}' container
##Create the vm
boot2docker init -m 8192 -s 30000 ( memory and size in GB)
###Start the vm
boot2docker up
# Set the variables
`eval "$(boot2docker shellinit)"`
## Find the IP
boot2docker IP
##ssh into docker vm
boot2docker ssh
/usr/hdp/current/kafka-broker/bin/kafka-topics.sh --list --zookeeper localhost:2181
/usr/hdp/current/kafka-broker/bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic test --replication-factor 1 --partitions 1
/usr/hdp/current/kafka-broker/bin/kafka-console-producer.sh --broker-list phdns01.cloud.hortonworks.com:6667 --topic test
/usr/hdp/current/kafka-broker/bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning
@nsabharwal
nsabharwal / control.sh
Last active January 19, 2016 19:01 — forked from randerzander/control.sh
Ambari Service Start/Stop script
USER='admin'
PASS='admin'
CLUSTER='dev'
HOST=$(hostname -f):8080
function start(){
curl -u $USER:$PASS -i -H 'X-Requested-By: ambari' -X PUT -d \
'{"RequestInfo": {"context" :"Start '"$1"' via REST"}, "Body": {"ServiceInfo": {"state": "STARTED"}}}' \
http://$HOST/api/v1/clusters/$CLUSTER/services/$1
}
HDFS test
Make sure that google connector is defined in Hadoop CLASSPATH as decribed in the blog
[hdfs@hdpgcp-1-1435537523061 ~]$ hdfs dfs -ls gs://hivetest/
15/06/28 21:15:32 INFO gcs.GoogleHadoopFileSystemBase: GHFS version: 1.4.0-hadoop2
15/06/28 21:15:33 WARN gcs.GoogleHadoopFileSystemBase: No working directory configured, using default: 'gs://hivetest/'
mysql -u hive -p -e " select concat( 'show create table ' , TBL_NAME,';') from TBLS" hive > file.sql
hive -f /tmp/file.sql
read -p "enter HS2 hostname: " HS2
read -p "enter username: " username
echo "enter password"
read -s passwd
read -p "enter filename: " filename
beeline -u jdbc:hive2://$HS2:10000/default -n $username -p $passwd -f $filename