Skip to content

Instantly share code, notes, and snippets.

@shijaz
Last active July 16, 2018 08:32
Show Gist options
  • Save shijaz/fb18e5328a6fe394d11ca6d7064a5f3f to your computer and use it in GitHub Desktop.
Save shijaz/fb18e5328a6fe394d11ca6d7064a5f3f to your computer and use it in GitHub Desktop.
Modified webbot.py for the robot project on www.awsomenow.com
#!/usr/bin/env python3
## webbot code modified by Shijaz Abdulla - www.awsomenow.com
## See the original project on GitHub: https://github.com/picymru/webbot
## This code is referenced in http://www.awsomenow.com/2018/07/06/building-a-robot-with-computer-vision-and-speech/
##
# Some of the code that controls the robot's movement is from the Webbot project by PiCymru. Including their copyright notice.
##
# Copyright (c) 2016 PiCymru
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, logging, subprocess, time, argparse
from bottle import route, request, response, redirect, hook, error, default_app, view, static_file, template, HTTPError
from gpiozero import CamJamKitRobot, DistanceSensor, LineSensor
# from picamera import PiCamera #we're calling raspistill using subprocess.Popen()
from time import sleep
import boto3
import datetime
global response
from contextlib import closing
import pygame
# I swapped 'right' with 'left' and 'back' with 'front' because of the way I had wired my motors.
# Depending on how you did yours, you may or may not have to change it back!
# === START webbot CODE by PiCymru ===
@route('/right')
def action_left():
robot.left()
time.sleep(0.2)
robot.stop()
return "LEFT TURN"
@route('/left')
def action_right():
robot.right()
time.sleep(0.2)
robot.stop()
return "RIGHT TURN"
@route('/forward')
@route('/forwards')
def action_forward():
robot.backward()
time.sleep(0.2)
robot.stop()
return "FORWARDS"
@route('/back')
@route('/backward')
def action_back():
robot.forward()
time.sleep(0.2)
robot.stop()
return "BACKWARDS"
@route('/ultrasonic')
def ultrasonic():
return "{:.2f}".format(sensor.distance)
# === END webbot CODE by PiCymru ===
# min_confidence=95 ensures that the robot only talks if Rekognition was 95%+ confident of the object it saw.
# Otherwise, the robot plays the ./notfound_Salli.mp3 file to say that it cannot see anything.
# max_labels=1 ensures Amazon Rekognition only returns one object (the most confident one)
def detect_labels(bucket, key, max_labels=1, min_confidence=95, region="eu-west-1"):
print("Recognizing")
rekognition = boto3.client("rekognition", region)
rekogresponse = rekognition.detect_labels(
Image={
"S3Object": {
"Bucket": bucket,
"Name": key,
}
},
MaxLabels=max_labels,
MinConfidence=min_confidence,
)
return rekogresponse['Labels']
@route('/cheese')
def cheese():
s3=boto3.client('s3')
polly=boto3.client("polly")
#Generate a filename for the picture on S3
now=datetime.datetime.now()
KEY = now.strftime("%Y%m%d%H%M%S")+".jpg"
#S3 bucket name for upload
BUCKET = "rekogrobot"
#take a picture without storing locally and send it to S3
response.content_type = 'image/jpeg'
response.cache_control = 'no-store'
with subprocess.Popen(["raspistill", "-w", "400", "-h", "300", "-o", "-"], stdout=subprocess.PIPE) as proc:
b=proc.stdout.read()
s3.put_object(ACL='public-read', Bucket=BUCKET, Key=KEY, Body=b)
s3location="https://"+BUCKET+".s3-eu-west-1.amazonaws.com/"+KEY
labels=detect_labels(BUCKET,KEY)
if labels:
#Print and read out response using Polly
for label in labels:
roundedconfidence=round(label['Confidence'],2)
speech= "I see a {Name} and I am ".format(**label)
speech= speech + str(roundedconfidence) + "% sure."
print(speech)
try:
pollyresponse = polly.synthesize_speech(Text=speech, OutputFormat="mp3", VoiceId="Salli")
except (BotoCoreError, ClientError) as error:
print(error)
sys.exit(-1)
if "AudioStream" in pollyresponse:
with closing(pollyresponse["AudioStream"]) as stream:
output = "./speech.mp3"
print("Speaking...")
try:
with open(output, "wb") as file:
file.write(stream.read())
except IOError as error:
print(error)
sys.exit(-1)
else:
print("Could not stream audio")
sys.exit(-1)
#open the file
pygame.mixer.init()
pygame.mixer.music.load(output)
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.play()
#Use the below if you want to code to wait till the robot finishes speaking
#while pygame.mixer.music.get_busy()==True:
# continue
else:
#Play a message that the robot couldn't identify any objects.
print("I could not recognize any objects.")
pygame.mixer.init()
pygame.mixer.music.load("./notfound_Salli.mp3")
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.play()
return b
@route('/')
def index():
return static_file('index.html', root='public')
@route('/style.css')
def index():
return static_file('style.css', root='public')
if __name__ == '__main__':
print("Robot is now ready.")
pygame.mixer.init()
pygame.mixer.music.load("./robotready_Salli.mp3")
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.play()
parser = argparse.ArgumentParser()
# Server settings
parser.add_argument("-i", "--host", default=os.getenv('IP', '127.0.0.1'), help="IP Address")
parser.add_argument("-p", "--port", default=os.getenv('PORT', 5000), help="Port")
# Additional hardware
parser.add_argument("--distance-sensor", help="enable distance sensor", default=False, action="store_true")
parser.add_argument("--line-sensor", help="enable line sensor", default=False, action="store_true")
# Verbose mode
parser.add_argument("--verbose", "-v", help="increase output verbosity", action="store_true")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
try:
robot = CamJamKitRobot()
if args.distance_sensor:
distance_sensor = DistanceSensor(18, 17)
if args.line_sensor:
line_sensor = LineSensor(4)
robot.stop()
except Exception as e:
log.error(e)
exit()
try:
app = default_app()
app.run(host=args.host, port=args.port, server='tornado')
except:
log.error("Unable to start server on {}:{}".format(args.host, args.port))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment