Skip to content

Instantly share code, notes, and snippets.

from agora_community_sdk import AgoraRTC
client = AgoraRTC.create_watcher("app-id", "chromedriver link")
client.join_channel("channel-name")
users = client.get_users() # Gets references to everyone participating in the call
user1 = users[0] # reference the broadcasting user
binary_image = user1.frame # Gets the latest frame from the stream as a PIL image
binary_image.save("link to in.png file(high resolution)")
def detect(im, param_vals):
"""
Detect number plates in an image.
:param im:
Image to detect number plates in.
:param param_vals:
Model parameters to use. These are the parameters output by the `train`
module.
:returns:
Iterable of `bbox_tl, bbox_br, letter_probs`, defining the bounding box
def letter_probs_to_code(letter_probs):
return "".join(common.CHARS[i] for i in numpy.argmax(letter_probs, axis=1))
if __name__ == "__main__":
im = cv2.imread("link to in.png file")
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) / 255.
f = numpy.load("link to weights.npz file")
param_vals = [f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))]
print ("RUN")
for pt1, pt2, present_prob, letter_probs in post_process(
detect(im_gray, param_vals)):
pt1 = tuple(reversed(map(int, pt1)))
pt2 = tuple(reversed(map(int, pt2)))
def _overlaps(match1, match2):
bbox_tl1, bbox_br1, _, _ = match1
bbox_tl2, bbox_br2, _, _ = match2
return (bbox_br1[0] > bbox_tl2[0] and
bbox_br2[0] > bbox_tl1[0] and
bbox_br1[1] > bbox_tl2[1] and
bbox_br2[1] > bbox_tl1[1])
def _group_overlapping_rectangles(matches):
@shriyaRam
shriyaRam / part1.py
Last active March 13, 2021 03:40
Gesture recognition
import pyttsx3
from predict import *
from agora_community_sdk import AgoraRTC
client = AgoraRTC.create_watcher("ab8078b5b58e4835af33a37580dbc864", "chromedriver.exe")
client.join_channel("test")
users = client.get_users() # Gets references to everyone participating in the call
user1 = users[0] # Can reference users in a list
binary_image = user1.frame
with open("ASL.jpg") as f:
f.write(binary_image)
@shriyaRam
shriyaRam / part2.py
Created January 16, 2020 09:57
Gesture Recognition
# Initialise Text to speech engine
engine = pyttsx3.init()
engine.setProperty('rate', 105)
engine.setProperty('voice', 1)
def withoutSkinSegment():
window_name = "ASL"
frame_height, frame_width, roi_height, roi_width = 480, 900, 200, 200
cap = cv2.VideoCapture("ASL.jpg")
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
x_start, y_start = 100, 100
sentence = ""
while True:
ret, frame = cap.read()
if ret is None:
@shriyaRam
shriyaRam / keypress.py
Created January 16, 2020 10:13
Gesture Recognition
# Speak the sentence
if len(sentence) > 0 and c == ord('s'):
engine.say(sentence)
engine.runAndWait()
# Clear the sentence
if c == ord('c') or c == ord('C'):
sentence = ""
# Delete the last character
if c == ord('d') or c == ord('D'):
sentence = sentence[:-1]
"""
Contains functions : pre_process() and which() that are needed by translator.py for predicting image from webcam
"""
import cv2
import numpy as np
from variables import *
from keras.models import load_model
# Loads pretrained CNN Model from MODEL_PATH