Instantly share code, notes, and snippets.

Jiacong Fang zldrobit

View GitHub Profile
View jupyterlab.sh
sudo docker run --rm -it -p 8888:8888 --name jupyterlab -e JUPYTER_ENABLE_LAB=yes -v "$PWD":/home/jovyan/work -e NB_UID=$(id -u) -e NB_GID=$(id -g) -e GRANT_SUDO=yes --user root zldrobit/jupyter:py27
# sudo docker run --rm -p 8888:8888 -it --name jupyter -e JUPYTER_ENABLE_LAB=yes -v "$PWD":/home/jovyan/work -e NB_UID=0 -e NB_GID=0 -e GRANT_SUDO=yes --user root zldrobit/jupyter:py27
sleep 3
sudo docker exec jupyter jupyter notebook list
# echo 'jupyter is running.'
View Dockerfile.pytorch:1.0-py2.7-cuda9.0-cudnn7
FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04
ARG PYTHON_VERSION=2.7
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
cmake \
git \
curl \
vim \
ca-certificates \
libjpeg-dev \
View install-deeplearning-env.txt
# install nvidia driver
sudo apt-get update
sudo apt-get install nvidia-384
sudo reboot
# set gpu cards to persistent mode
sudo nvidia-persistenced --persistence-mode
# follow docker installation instructions from
# https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-docker-ce-1
View tf-serving-post-request.py
import base64
import requests
file = 'image.jpeg'
# predict is the method name
# /models/detection is the model path
url = 'http://localhost:8501/v1/models/detection:predict'
content = open(file, 'rb').read()
# encode binary data to b64 before json encode
content = base64.b64encode(content)
@zldrobit
zldrobit / reflect.py
Created Dec 29, 2018 — forked from huyng/reflect.py
A simple echo server to inspect http web requests
View reflect.py
#!/usr/bin/env python
# Reflects the requests from HTTP methods GET, POST, PUT, and DELETE
# Written by Nathan Hamiel (2010)
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from optparse import OptionParser
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
View vimrc.txt
set nocompatible " be iMproved, required
filetype off " required
" set the runtime path to include Vundle and initialize
set rtp+=~/.vim/bundle/Vundle.vim
call vundle#begin()
" alternatively, pass a path where Vundle should install plugins
"call vundle#begin('~/some/path/here')
" let Vundle manage Vundle, required
View torch7-autograd-optim.lua
require 'nn';
require 'nngraph';
require 'optim';
autograd = require 'autograd';
-- W0 = torch.ones(3, 1)
params = {W=torch.rand(3,1)}
optimState = {learningRate=0.001}
criterion = autograd.nn.MSECriterion()
function neuralNet(params, x, y)
View tf-serving-request-bitstream.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 22 15:12:17 2018
@author: robitfang
"""
from grpc.beta import implementations
from tensorflow_serving.apis import predict_pb2
View tensorflow_model_analysis.py
import tensorflow as tf
import argparse
def profile(graph, cmd):
run_meta = tf.RunMetadata()
writer = tf.summary.FileWriter("./graph", graph)
writer.close()
opts = tf.profiler.ProfileOptionBuilder.float_operation()
View tensorflow_gen_saved_model.py
import tensorflow as tf
import argparse
import os
parser = argparse.ArgumentParser(description='Generate a saved model.')
parser.add_argument('--export_model_dir', type=str, default='./saved_model/the_model', help='export model directory')
parser.add_argument('--model_version', type=int, default=1, help='model version')
parser.add_argument('--model', type=str, default='the_model.pb', help='model pb file')
parser.add_argument("--input_tensor", default="input:0", help="input tensor", type=str)
parser.add_argument("--output_tensor", default="output:0", help="output tensor", type=str)