Skip to content

Instantly share code, notes, and snippets.

@laisan86
Created April 29, 2017 12:48
Show Gist options
  • Save laisan86/49c41ab6c7d043700745de9475e28e1b to your computer and use it in GitHub Desktop.
Save laisan86/49c41ab6c7d043700745de9475e28e1b to your computer and use it in GitHub Desktop.
Video recognition and Web API learning
import sys
import time
import httplib, urllib
import cv2
import serial
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import urllib2
import json
thingSpeakApiKey = "O93NMWVP2BW5NQME"
# Load the XML for trained information
cascPath = '/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
# Set /dev/video0 as device
video_capture = cv2.VideoCapture(0)
# Set serial port for Arduino Uno
ser = serial.Serial( "/dev/ttyUSB0", baudrate=9600, timeout=0.5 )
def post_to_thingspeak(payload):
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
not_connected = 1
while (not_connected):
try:
conn = httplib.HTTPConnection("api.thingspeak.com:80")
conn.connect()
not_connected = 0
except (httplib.HTTPException, socket.error) as ex:
print "Error: %s" % ex
# time.sleep(10)
conn.request("POST", "/update", payload, headers)
response = conn.getresponse()
print( response.status, response.reason, payload, time.strftime("%c"))
data = response.read()
conn.close()
def learning():
register_openers()
datagen, headers = multipart_encode({"pic": open("/tmp/temp.jpg")})
upload_url = 'http://scsonic.com/nasa/upload.php'
request = urllib2.Request(upload_url, datagen, headers)
j = urllib2.urlopen(request).read()
dj = json.loads(j)
return dj["catch"]
while True:
try:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
# Write a copy to memory to learn by TensorFlow
cv2.imwrite('/tmp/temp.jpg', frame)
if learning() == True:
# Query the environemtal informations and catch it!!!
ser.write("B")
env = ser.readline()
e = env.split()
params = urllib.urlencode({'field1': e[0], 'field2': e[1], 'field3': e[2], 'field4': e[3], 'field5': e[4], 'key': thingSpeakApiKey})
post_to_thingspeak(params)
else:
ser.write("R")
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except:
pass
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
# Close the serial port
ser.close()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment