Skip to content

Instantly share code, notes, and snippets.

use "-undefined dynamic_lookup" instead of "-lpython"
use "-Xpreprocessor -fopenmp -lomp" instead of "-fopenmp"
flags to be added for lpython: "-L/usr/local/opt/libomp/lib -I/usr/include/python2.7" ?
get numpy include dir: np.get_include()
run_meta = tf.RunMetadata()
with tf.Session(config=config) as sess:
opts = tf.profiler.ProfileOptionBuilder.float_operation()
flops = tf.profiler.profile(sess.graph, run_meta=run_meta, cmd='op', options=opts)
opts = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()
params = tf.profiler.profile(sess.graph, run_meta=run_meta, cmd='op', options=opts)
print("ops {:,} --- params {:,}".format(flops.total_float_ops, params.total_parameters))
# from https://stackoverflow.com/questions/30755249/hierarchy-in-matplotlib
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
subplot_args = {'projection':'3d'}
fig, ax = plt.subplots(subplot_kw=subplot_args)
import tensorflow as tf
import argparse
import os
parser = argparse.ArgumentParser(description='Generate a saved model.')
parser.add_argument('--export_model_dir', type=str, default='./saved_model/the_model', help='export model directory')
parser.add_argument('--model_version', type=int, default=1, help='model version')
parser.add_argument('--model', type=str, default='the_model.pb', help='model pb file')
parser.add_argument("--input_tensor", default="input:0", help="input tensor", type=str)
parser.add_argument("--output_tensor", default="output:0", help="output tensor", type=str)
import tensorflow as tf
import argparse
def profile(graph, cmd):
run_meta = tf.RunMetadata()
writer = tf.summary.FileWriter("./graph", graph)
writer.close()
opts = tf.profiler.ProfileOptionBuilder.float_operation()
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 22 15:12:17 2018
@author: robitfang
"""
from grpc.beta import implementations
from tensorflow_serving.apis import predict_pb2
require 'nn';
require 'nngraph';
require 'optim';
autograd = require 'autograd';
-- W0 = torch.ones(3, 1)
params = {W=torch.rand(3,1)}
optimState = {learningRate=0.001}
criterion = autograd.nn.MSECriterion()
function neuralNet(params, x, y)
set nocompatible " be iMproved, required
filetype off " required
" set the runtime path to include Vundle and initialize
set rtp+=~/.vim/bundle/Vundle.vim
call vundle#begin()
" alternatively, pass a path where Vundle should install plugins
"call vundle#begin('~/some/path/here')
" let Vundle manage Vundle, required
@zldrobit
zldrobit / reflect.py
Created December 29, 2018 08:39 — forked from huyng/reflect.py
A simple echo server to inspect http web requests
#!/usr/bin/env python
# Reflects the requests from HTTP methods GET, POST, PUT, and DELETE
# Written by Nathan Hamiel (2010)
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from optparse import OptionParser
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
import base64
import requests
file = 'image.jpeg'
# predict is the method name
# /models/detection is the model path
url = 'http://localhost:8501/v1/models/detection:predict'
content = open(file, 'rb').read()
# encode binary data to b64 before json encode
content = base64.b64encode(content)