Skip to content

Instantly share code, notes, and snippets.

View iandow's full-sized avatar

Ian Downard iandow

View GitHub Profile
MESSAGES=1000000
MSGSIZE=100
maprcli stream delete -path /user/mapr/iantest >& /dev/null
maprcli stream create -path /user/mapr/iantest -produceperm p -consumeperm p -topicperm p
mapr perfproducer -path /user/mapr/iantest | tail -n 12 | grep ":" | awk -F ":" '{printf "%s, ",$1}' >> partitions.csv
echo "numPartitions, numMsgs, sizeMsg, eth0 TX kB/s, eth0 RX kB/s" >> partitions.csv
maprcli stream delete -path /user/mapr/iantest >& /dev/null
for NUMPARTITIONS in 1 2 3 4 5 6 7 8 9 10 15 20 25 30 35 40 45 50 60 70 80 90 100 150 200 250 300 350 400; do
maprcli stream create -path /user/mapr/iantest -produceperm p -consumeperm p -topicperm p -defaultpartitions $NUMPARTITIONS
while true; do
tail -n 40 /opt/kafka_2.11-0.10.0.1/logs/server.log | grep ZkTimeout;
if [ $? -eq 0 ]; then
sudo service kafka stop
ssh kafkanodeb sudo service kafka stop
ssh kafkanodec sudo service kafka stop
sudo rm -rf /tmp/kafka-logs
ssh kafkanodeb sudo rm -rf /tmp/kafka-logs
ssh kafkanodec sudo rm -rf /tmp/kafka-logs
sudo rm -rf /tmp/zookeeper/version-2
environment:
mapr_core_version: 5.2.0
config:
admin_id: mapr
cluster_admin_create: false
cluster_admin_gid: 5000
cluster_admin_group: mapr
cluster_admin_id: mapr
cluster_admin_uid: 5000
cluster_id: '6486878629867426246'
import os, sys
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# change this as you see fit
image_path = sys.argv[1]
# Read in the image_data
############################################################
# Daemon
############################################################
# Start in daemon (background) mode and release terminal (default: off)
daemon on
# File to store the process ID, also called pid file. (default: not defined)
process_id_file /var/run/motion/motion.pid
@iandow
iandow / gist:97502c6959bdaa1eb0e5ed030f9e82a0
Last active July 24, 2017 23:03
maprdb_pyspark_error.txt
mapr@nodea107:~/python-bindings$ PYSPARK_PYTHON=/usr/bin/python3 /opt/mapr/spark/spark-2.1.0/bin/pyspark --py-files dist/maprdb-0.0.1-py3.4.egg
Python 3.4.3 (default, Nov 17 2016, 01:08:31)
[GCC 4.8.4] on linux
Type "help", "copyright", "credits" or "license" for more information.
17/07/24 22:45:40 WARN Utils: Service 'SparkUI' could not bind on port 4040. Attempting port 4041.
17/07/24 22:45:40 WARN Utils: Service 'SparkUI' could not bind on port 4041. Attempting port 4042.
17/07/24 22:45:40 WARN Utils: Service 'SparkUI' could not bind on port 4042. Attempting port 4043.
Welcome to
____ __
/ __/__ ___ _____/ /__
nvidia@tegra-ubuntu:~/nd4j$ mvn clean install
[INFO] Scanning for projects...
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Build Order:
[INFO]
[INFO] nd4j
[INFO] nd4j-shade
[INFO] jackson
[INFO] nd4j-common
[INFO] nd4j-context
{"paragraphs":[{"text":"%md # Forest Fire Prediction through KMeans Clustering\n<img src=\"https://surveymonkey-assets.s3.amazonaws.com/survey/121135814/6a48257c-8996-4aa6-ba56-6b1e373385c3.png\" width=100 hspace=\"20\" style=\"float: right;\">\nThe United States Forest Service provides datasets that describe forest fires that have occurred in Canada and the United States since year 2000. We can predict where forest fires are prone to occur by partitioning the locations of past burns into clusters whose centroids can be used to optimally place heavy fire fighting equipment as near as possible to where fires are likely to occur.\n\nDataset:\nhttps://fsapps.nwcg.gov/gisdata.php\n","user":"anonymous","dateUpdated":"2017-10-24T17:06:43+0000","config":{"colWidth":12,"enabled":true,"results":{},"editorSetting":{"language":"markdown","editOnDblClick":true},"editorMode":"ace/mode/markdown","editorHide":true,"tableHide":false},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data
Pipeline Status: RUNNING_ERROR: com.streamsets.pipeline.api.base.OnRecordErrorException: HTTP_01 - Error fetching resource. Status: 400 Reason: Bad Request {"error":{"code":400,"message":"One or more data points had errors","details":"Please see the TSD logs or append \"details\" to the put request","trace":"net.opentsdb.tsd.BadRequestException: One or more data points had errors\n\tat net.opentsdb.tsd.PutDataPointRpc$1GroupCB.call(PutDataPointRpc.java:601) [tsdb-2.4.0RC1.jar:a64bcee]\n\tat net.opentsdb.tsd.PutDataPointRpc.processDataPoint(PutDataPointRpc.java:664) [tsdb-2.4.0RC1.jar:a64bcee]\n\tat net.opentsdb.tsd.PutDataPointRpc.execute(PutDataPointRpc.java:278) [tsdb-2.4.0RC1.jar:a64bcee]\n\tat net.opentsdb.tsd.RpcHandler.handleHttpQuery(RpcHandler.java:283) [tsdb-2.4.0RC1.jar:a64bcee]\n\tat net.opentsdb.tsd.RpcHandler.messageReceived(RpcHandler.java:134) [tsdb-2.4.0RC1.jar:a64bcee]\n\tat org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) [netty-3.9.4.
FROM docker.artifactory/pacc_nvidia:cuda-9.0-base
RUN apt-get update -y && \
apt-get install -y build-essential libopenblas-dev liblapack-dev \
libopencv-dev cuda-command-line-tools-9-0 \
cuda-cublas-dev-9-0 \
cuda-cudart-dev-9-0 \
cuda-cufft-dev-9-0 \
cuda-curand-dev-9-0 \
cuda-cusolver-dev-9-0 \