Skip to content

Instantly share code, notes, and snippets.

@nlothian
Last active February 19, 2019 11:24
Show Gist options
  • Save nlothian/c3519adb81b3452c1938 to your computer and use it in GitHub Desktop.
Save nlothian/c3519adb81b3452c1938 to your computer and use it in GitHub Desktop.
from flask import Flask
from flask import request
import simplejson as json
from decimal import Decimal
import numpy as np
import os
import sys
# Make sure that you set this to the location your caffe2 library lies.
caffe2_root = '<enter the path to caffe2 here'
sys.path.insert(0, os.path.join(caffe2_root, 'gen'))
# After setting the caffe2 root path, we will import all the caffe2 libraries needed.
from caffe2.proto import caffe2_pb2
from pycaffe2 import core, net_drawer, workspace
# net is the network definition.
net = caffe2_pb2.NetDef()
net.ParseFromString(open('inception_net.pb').read())
# tensors contain the parameter tensors.
tensors = caffe2_pb2.TensorProtos()
tensors.ParseFromString(open('inception_tensors.pb').read())
DEVICE_OPTION = caffe2_pb2.DeviceOption()
# Uncomment this to use the CPU
#DEVICE_OPTION.device_type = caffe2_pb2.CPU
# If you have a GPU and want to run things there, uncomment the below two lines.
# If you have multiple GPUs, you also might want to specify a gpu id.
DEVICE_OPTION.device_type = caffe2_pb2.CUDA
DEVICE_OPTION.cuda_gpu_id = 0
# Caffe2 has a concept of "workspace", which is similar to that of Matlab. Each workspace
# is a self-contained set of tensors and networks. In this case, we will just use the default
# workspace so we won't dive too deep into it.
workspace.SwitchWorkspace('default')
# First, we feed all the parameters to the workspace.
for param in tensors.protos:
workspace.FeedBlob(param.name, param, DEVICE_OPTION)
# The network expects an input blob called "input", which we create here.
# The content of the input blob is going to be fed when we actually do
# classification.
workspace.CreateBlob("input")
# Specify the device option of the network, and then create it.
net.device_option.CopyFrom(DEVICE_OPTION)
workspace.CreateNet(net)
# We will also load the synsets file where we can look up the actual words for each of our prediction.
synsets = [l.strip() for l in open('synsets.txt').readlines()]
def ClassifyFromUrl(url, output_name="softmax2"):
from skimage import io, transform
img = io.imread(url)
# Crop the center
shorter_edge = min(img.shape[:2])
crop_height = (img.shape[0] - shorter_edge) / 2
crop_width = (img.shape[1] - shorter_edge) / 2
cropped_img = img[crop_height:crop_height + shorter_edge, crop_width:crop_width + shorter_edge]
# Resize the image to 224 * 224
resized_img = transform.resize(cropped_img, (224, 224))
# normalize the image and feed it into the network. The network expects
# a four-dimensional tensor, since it can process images in batches. In our
# case, we will basically make the image as a batch of size one.
normalized_img = resized_img.reshape((1, 224, 224, 3)).astype(np.float32) * 256 - 117
workspace.FeedBlob("input", normalized_img, DEVICE_OPTION)
workspace.RunNet("inception")
return workspace.FetchBlob(output_name)
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
return "<html><body><form action='recognize' method='GET'><input type='text' size='100' name='url'><button type='submit'>Submit</button></form></body></html>"
@app.route('/recognize', methods=['GET'])
def recognize():
url = request.args.get('url', '')
predictions = ClassifyFromUrl(url).flatten()
indices = np.argsort(predictions)
output = [];
# We want the first 4 predictions
maxIdx = -1 * min(len(predictions), 5)
for idx in indices[:maxIdx:-1]:
output.append({'prediction':synsets[idx], 'score':Decimal(str(predictions[idx]))})
#return jsonify(predictions=output)
return json.dumps({'predictions':output}, use_decimal=True)
if __name__ == '__main__':
app.debug = True
app.run()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment