Skip to content

Instantly share code, notes, and snippets.

Avatar

Anand P V AdroitAnandAI

View GitHub Profile
@AdroitAnandAI
AdroitAnandAI / licensePlateDetect.py
Created Sep 17, 2021
To detect the license plate rectangle from an image with car
View licensePlateDetect.py
img = cv2.imread("car.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, gray = cv2.threshold(gray, 250,255,0)
# applying different thresholding techniques on the input image
# all pixels value above 120 will be set to 255
ret, thresh2 = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY_INV)
gray = cv2.cvtColor(thresh2, cv2.COLOR_BGR2GRAY)
contours, _ = cv2.findContours(gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
@AdroitAnandAI
AdroitAnandAI / monodepth.py
Last active Sep 16, 2021
Monocular Depth Estimation
View monodepth.py
# Detections contains bounding boxes using object detection model
boxcount = 0
depths = []
bboxMidXs = []
bboxMidYs = []
# This is computed to reflect real distance during initial camera calibration
scalingFactor = 1000
# Depth scaling factor is based on one-time cam calibration
for detection in detections:
@AdroitAnandAI
AdroitAnandAI / lidarNode.py
Last active Sep 16, 2021
Node 2 Sensor Fusion
View lidarNode.py
# Sensor Fusion happens at Node 2
def on_message(client, userdata, msg):
word = msg.payload.decode()
# objAttributes contains label,
# theta min and max separated by |
objAttributes = word.split('|')
now = time.localtime()
@AdroitAnandAI
AdroitAnandAI / shapecontext.py
Created Sep 16, 2021
To build and compare shape context
View shapecontext.py
# This code builds the shape context descriptor, which is the core of our alphanumeral comparison
# https://github.com/AdroitAnandAI/Multilingual-Text-Inversion-Detection-of-Scanned-Images
# points represents the edge shape
t_points = len(points)
# getting euclidian distance
r_array = cdist(points, points)
# for rotation invariant feature
am = r_array.argmax()
max_points = [am / t_points, am % t_points]
# normalizing
@AdroitAnandAI
AdroitAnandAI / detectCircularGesture.py
Last active Sep 16, 2021
To detect the object movement in circular motion
View detectCircularGesture.py
# loop over the set of tracked points
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
@AdroitAnandAI
AdroitAnandAI / ADASObjDetect.py
Created Sep 16, 2021
To send object detection result to LIDAR node for Sensor Fusion
View ADASObjDetect.py
for detection in detections:
if detection.score > threshold:
class_id = int(detection.id)-1
# Potential Objects: person, bicycle, car, bus,
# truck, traffic light, street sign, stop sign
if class_id not in [0, 1, 2, 3, 5, 7, 9, 11, 12]:
@AdroitAnandAI
AdroitAnandAI / getObjectDistance.py
Created Aug 12, 2021
Find distnace to an object
View getObjectDistance.py
def getObjectDistance (angle_min, angle_max):
minDist = 0
lidar = RPLidar(None, PORT_NAME)
try:
for scan in lidar_scans(lidar):
for (_, angle, distance) in scan:
scan_data[min([359, floor(angle)])] = distance
@AdroitAnandAI
AdroitAnandAI / inverseSigmoid.py
Last active Aug 12, 2021
Curve Fitting to identify rise or fall of signal
View inverseSigmoid.py
# Code to fit the inverse sigmoid curve to tail end of signal
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(k*(x-x0)))+b
return (y)
def isCurveSigmoid(pixelCounts, count):
try:
xIndex = len(pixelCounts)
p0 = [max(pixelCounts), np.median(xIndex),1,min(pixelCounts)] # this is an mandatory initial guess
popt, pcov = curve_fit(sigmoid, list(range(xIndex)), pixelCounts, p0, method='lm', maxfev=5000)
@AdroitAnandAI
AdroitAnandAI / slamviz-mqtt.py
Created Aug 12, 2021
SLAM visualization with MQTT
View slamviz-mqtt.py
# At the MQTT Transmission side
data2Transmit = np.array([x, y, theta])
# Map which is saved as a bytearray is appended at the end
if scan_count % 30 == 0:
client.publish("safetycam/topic/slamviz", \\
data2Transmit.tobytes() + mapbytes)
# At the MQTT receiving side
View custom_slam.py
## Optimized implementation of Graph SLAM.
## slam takes in 6 arguments and returns mu,
## mu is the entire path traversed by a robot (all x,y poses) *and* all landmarks locations
def slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise):
coefficients = [1, -1, -1, 1]
# initialize the constraints
initial_omega_1, initial_xi_1, initial_omega_2, initial_xi_2 = \\