Skip to content

Instantly share code, notes, and snippets.

@whatnick
Created April 27, 2016 12:28
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save whatnick/66b412b212d8ed93780d8d0835743d43 to your computer and use it in GitHub Desktop.
Save whatnick/66b412b212d8ed93780d8d0835743d43 to your computer and use it in GitHub Desktop.
'''
Created on Apr 24, 2016
@author: tisham
'''
import time
import requests
import cv2
import operator
import numpy as np
# from grove_rgb_lcd import *
_url = 'https://api.projectoxford.ai/vision/v1.0/analyze?visualFeatures=Description,Tags'
_key = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' # Here you have to paste your primary key
_maxNumRetries = 10
# Import library to display results
import matplotlib.pyplot as plt
from pylab import show
def processRequest(json, data, headers, params=None):
"""
Helper function to process the request to Project Oxford
Parameters:
json: Used when processing images from its URL. See API Documentation
data: Used when processing image read from disk. See API Documentation
headers: Used to pass the key information and the data type request
"""
retries = 0
result = None
while True:
response = requests.request('post', _url, json=json, data=data, headers=headers, params=params)
if response.status_code == 429:
print "Message: %s" % (response.json()['error']['message'])
if retries <= _maxNumRetries:
time.sleep(1)
retries += 1
continue
else:
print 'Error: failed after retrying!'
break
elif response.status_code == 200 or response.status_code == 201:
if 'content-length' in response.headers and int(response.headers['content-length']) == 0:
result = None
elif 'content-type' in response.headers and isinstance(response.headers['content-type'], str):
if 'application/json' in response.headers['content-type'].lower():
result = response.json() if response.content else None
elif 'image' in response.headers['content-type'].lower():
result = response.content
else:
print "Error code: %d" % (response.status_code)
print "Message: %s" % (response.json()['error']['message'])
break
return result
def renderResultOnImage(result, img):
"""Display the obtained results onto the input image"""
R = int(result['color']['accentColor'][:2], 16)
G = int(result['color']['accentColor'][2:4], 16)
B = int(result['color']['accentColor'][4:], 16)
cv2.rectangle(img, (0, 0), (img.shape[1], img.shape[0]), color=(R, G, B), thickness=25)
if 'categories' in result:
categoryName = sorted(result['categories'], key=lambda x: x['score'])[0]['name']
cv2.putText(img, categoryName, (30, 70), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 3)
def showResultOnLCD(result):
"""Display recognition result on Grove-LCD"""
if 'description' in result:
caption = result['description']['captions'][0]['text']
print(caption)
# setText(categoryName)
# setRGB(0,0,64)
if __name__ == '__main__':
# URL direction to image
# urlImage = 'https://oxfordportal.blob.core.windows.net/vision/Analysis/3.jpg'
# Capture image using opencv
cap = cv2.VideoCapture(0)
# set the width and height, and UNSUCCESSFULLY set the exposure time
cap.set(3, 1280)
cap.set(4, 1024)
cap.set(15, 0.1)
ret, img = cap.read()
cv2.imwrite('temp_cap.jpg', img)
pathToFileInDisk = r'temp_cap.jpg'
with open(pathToFileInDisk, 'rb') as f:
data = f.read()
# Computer Vision parameters
params = { 'visualFeatures' : 'Color,Categories'}
headers = dict()
headers['Ocp-Apim-Subscription-Key'] = _key
headers['Content-Type'] = 'application/octet-stream'
# json = { 'url': urlImage }
json = None
# data = None
result = processRequest(json, data, headers, params)
print(result)
# Load the original image, fetched from the URL
# arr = np.asarray( bytearray( requests.get( urlImage ).content ), dtype=np.uint8 )
# img = cv2.cvtColor( cv2.imdecode( arr, -1 ), cv2.COLOR_BGR2RGB )
showResultOnLCD(result)
# ig, ax = plt.subplots(figsize=(15, 20))
# ax.imshow( img )
# show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment