Skip to content

Instantly share code, notes, and snippets.

@mikeapted
Created August 24, 2019 01:33
Show Gist options
  • Save mikeapted/f820d52f263b3dcf2e2ba1494f7eab17 to your computer and use it in GitHub Desktop.
Save mikeapted/f820d52f263b3dcf2e2ba1494f7eab17 to your computer and use it in GitHub Desktop.
Sample Python code for sentiment analysis with Rekognition from laptop webcam
import time
import cv2
import boto3
# Get the Client
session = boto3.Session()
rekog_client = session.client("rekognition", region_name='us-east-1')
width = 1280
height = 720
scale_factor = 0.1
text_color = (0, 0, 255)
# Current time in milliseconds
def current_milli_time():
return int(round(time.time() * 1000))
# Use strongest emotion
def find_emotion(emotions):
num = {'Confidence': 0.0, 'Type': None}
for item in emotions:
if item['Confidence'] > num['Confidence']:
num = item
return num
# Display captured frame and overlay boxes/output using OpenCV
def display(frame, responses):
faces = responses['FaceDetails']
boxes = []
x1 = x2 = y1 = y2 = 0
gender = age = smile = mustache = beard = emotion = ""
for face in faces:
# For each face, get the data
boxes.append(face['BoundingBox'])
gender = face['Gender']['Value']
age = 'Age: ' + str(face['AgeRange']['Low']) + \
'-' + str(face['AgeRange']['High'])
smile = 'Smile: ' + str(face['Smile']['Value'])
mustache = 'Mustache: ' + str(face['Mustache']['Value'])
beard = 'Beard: ' + str(face['Beard']['Value'])
emotion_map = find_emotion(face['Emotions'])
emotion = emotion_map['Type']
# Get the face bounding box
for box in boxes:
x1 = int(box['Left'] * width)
y1 = int(box['Top'] * height)
x2 = int(box['Left'] * width + box['Width'] * width)
y2 = int(box['Top'] * height + box['Height'] * height)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
y1 = y1 - 15 if y1 - 15 > 15 else y1 + 15
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
cv2.putText(frame, gender, (x2 + 40, y1 + 40),
font, 2, text_color, 1, cv2.LINE_AA)
cv2.putText(frame, age, (x2 + 40, y1 + 80),
font, 2, text_color, 1, cv2.LINE_AA)
cv2.putText(frame, smile, (x2 + 40, y1 + 120),
font, 2, text_color, 1, cv2.LINE_AA)
cv2.putText(frame, mustache, (x2 + 40, y1 + 160),
font, 2, text_color, 1, cv2.LINE_AA)
cv2.putText(frame, beard, (x2 + 40, y1 + 200),
font, 2, text_color, 1, cv2.LINE_AA)
cv2.putText(frame, emotion, (x2 + 40, y1 + 240),
font, 2, text_color, 1, cv2.LINE_AA)
window_name = 'Rekognition Demo'
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
#cv2.setWindowProperty(
# window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow(window_name, frame)
# Start video capture and loop over frames, calling Rekognition, display results
def main():
vidcap = cv2.VideoCapture(0)
vidcap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
vidcap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
while True:
success, image = vidcap.read()
cnt = 0
t1 = current_milli_time()
while success:
success, frame = vidcap.read()
if success:
scaled_frame = cv2.resize(
frame, (int(width * scale_factor), int(height * scale_factor)))
rval, buffer = cv2.imencode('.jpg', scaled_frame)
img_bytes = bytearray(buffer)
response = rekog_client.detect_faces(Image={'Bytes': img_bytes},
Attributes=['ALL'])
display(frame, response)
# Pressing Q for exit
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# Release the camera and windows
vidcap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment