Skip to content

Instantly share code, notes, and snippets.

@varun7654
Last active August 23, 2022 01:47
Show Gist options
  • Save varun7654/3adedeabc6b6b8d2e3db46b01a2d6266 to your computer and use it in GitHub Desktop.
Save varun7654/3adedeabc6b6b8d2e3db46b01a2d6266 to your computer and use it in GitHub Desktop.
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.
###############################################
## Open CV and Numpy integration ##
###############################################
# First import the library
import random
import time
import pyrealsense2 as rs
# Import Numpy for easy array manipulation
import numpy as np
# Import OpenCV for easy image rendering
import cv2
from networktables import NetworkTables
# Initialize Network Tables
# As a client to connect to a robot
NetworkTables.initialize(server='127.0.0.1')
red_pos_entry = NetworkTables.getEntry('/realsense/pos_red')
blue_pos_entry = NetworkTables.getEntry('/realsense/pos_blue')
latency_entry = NetworkTables.getEntry('/realsense/latency')
# Create a pipeline
pipeline = rs.pipeline()
# Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))
found_rgb = False
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("The demo requires Depth camera with Color sensor")
exit(0)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
profile = pipeline.start(config)
color_sensor = profile.get_device().query_sensors()[1]
color_sensor.set_option(rs.option.enable_auto_exposure, False)
color_sensor.set_option(rs.option.enable_auto_white_balance, False)
color_sensor.set_option(rs.option.exposure, 430)
color_sensor.set_option(rs.option.white_balance, 3500)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: ", depth_scale)
depth_sensor.set_option(rs.option.laser_power, 360) # Max Laser power
# We will be removing the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 6 # 6 meters
clipping_distance = clipping_distance_in_meters / depth_scale
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
hole_filling = rs.hole_filling_filter(1)
def draw_info(contours, color, color_image, nt_entry=None):
positions = [1]
for contour in contours:
area = cv2.contourArea(contour)
if area > 100:
color_copy = color
total_depth = 0
depth_pixels = 0
total_pixels = 0
for pixel in contour:
depth = depth_frame.get_distance(pixel[0][0], pixel[0][1])
total_pixels += 1
if depth > 0:
total_depth += depth
depth_pixels += 1
# TODO: do some statistical filtering on the depth values to reject outliers
if depth_pixels > 0:
avg_depth = total_depth / depth_pixels
fill = depth_pixels / total_pixels
x, y, w, h = cv2.boundingRect(contour)
# make the box yellow if the box isn't square enough
w_h_ratio = min(w / h, h / w)
if w_h_ratio > 0.8:
pos = rs.rs2_deproject_pixel_to_point(aligned_depth_intrinsics, (x + w / 2, y + h / 2), avg_depth + 0.0889)
pos_top_left = rs.rs2_deproject_pixel_to_point(aligned_depth_intrinsics, (x, y), avg_depth + 0.0889)
pos_bottom_right = rs.rs2_deproject_pixel_to_point(aligned_depth_intrinsics, (x + w, y + h), avg_depth + 0.0889)
avg_radius = ((pos_bottom_right[0] - pos_top_left[0]) / 4) + ((pos_bottom_right[1] - pos_top_left[1]) / 4)
if abs((((w * h) * 0.785) / area) - 1) < 0.1:
if abs(avg_radius - 0.136 + (0.013 * pos[2])) < 0.015:
cv2.rectangle(color_image, (x, y), (x + w, y + h), color_copy, 2)
cv2.putText(color_image, "X{:.2f} Y{:.2f} Z{:.2f} R{:.3f}".format(pos[0], pos[1], pos[2], avg_radius, ), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1 / avg_depth, color, 2)
positions.append(pos[0])
positions.append(pos[1])
positions.append(pos[2])
if nt_entry is not None:
if len(positions) > 1:
nt_entry.setValue(positions)
# print(positions)
else:
nt_entry.setValue([0])
try:
while True:
# Get frameset of color and depth
frames = pipeline.wait_for_frames()
# frames.get_depth_frame() is a 640x360 depth image
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
color_frame = aligned_frames.get_color_frame()
# Validate that both frames are valid
if not depth_frame or not color_frame:
continue
filled_depth = hole_filling.process(depth_frame)
aligned_depth_intrinsics = rs.video_stream_profile(depth_frame.profile).get_intrinsics()
depth_image = np.asanyarray(depth_frame.get_data())
filled_depth_image = np.asanyarray(filled_depth.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Remove background - Set pixels further than clipping_distance to grey
back_color = 0
depth_image_3d = np.dstack((depth_image, depth_image, depth_image)) # depth image is 1 channel, color is 3 channels
bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), back_color, color_image)
# Render images:
# depth align to color on left
# depth on right
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_HSV)
depth_gray = cv2.convertScaleAbs(filled_depth_image, alpha=0.05)
edge = cv2.Canny(depth_gray, 10, 200, apertureSize=5, L2gradient=False) # Use the depth to determine the edges between objects
cv2.imshow('depth edges', cv2.bitwise_or(depth_gray, edge))
edge = cv2.dilate(edge, None, iterations=1)
edge = cv2.bitwise_not(edge) # Invert the edges so that we can use it as a mask
blurred = cv2.GaussianBlur(color_image, (11, 11), 0)
blurred = cv2.bitwise_and(blurred, blurred, mask=edge) # ensure that there is at least a 1 pixel gap between objects of different depths
cv2.imshow("Blured Masked", blurred)
img_hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
img_threshold_blue = cv2.inRange(img_hsv, (87, 150, 10), (100, 255, 255))
img_threshold_blue = cv2.erode(img_threshold_blue, None, iterations=2)
img_threshold_blue = cv2.dilate(img_threshold_blue, None, iterations=2)
min_sat_red = 150
max_sat_red = 255
min_val_red = 40
max_val_red = 255
img_threshold_red_1 = cv2.inRange(img_hsv, (0, min_sat_red, min_val_red), (15, max_sat_red, max_val_red))
img_threshold_red_2 = cv2.inRange(img_hsv, (175, min_sat_red, min_val_red), (180, max_sat_red, max_val_red))
# Merge the mask and crop the red regions
img_threshold_red = cv2.bitwise_or(img_threshold_red_1, img_threshold_red_2)
img_threshold_red = cv2.erode(img_threshold_red, None, iterations=2)
img_threshold_red = cv2.dilate(img_threshold_red, None, iterations=2)
# img_threshold_red = cv2.erode(img_threshold_red, None, iterations=3)
# img_threshold_red = cv2.dilate(img_threshold_red, None, iterations=3)
cv2.imshow('Blue balls', img_threshold_blue)
cv2.imshow('Red balls', img_threshold_red)
# cv2.imshow('Red balls', img_threshold_red)
blue_contours, blue_hierarchy = cv2.findContours(img_threshold_blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(depth_colormap, blue_contours, -1, (255, 0, 0), 3)
cv2.drawContours(color_image, blue_contours, -1, (255, 0, 0), 3)
red_contours, red_hierarchy = cv2.findContours(img_threshold_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(depth_colormap, red_contours, -1, (0, 0, 255), 3)
cv2.drawContours(color_image, red_contours, -1, (0, 0, 255), 3)
draw_info(red_contours, (0, 0, 255), depth_colormap, nt_entry=red_pos_entry)
draw_info(blue_contours, (255, 0, 0), depth_colormap, nt_entry=blue_pos_entry)
latency = time.time() * 1000 - frames.get_timestamp()
latency_entry.setValue([latency])
print("latency: {}".format(latency))
NetworkTables.flush()
draw_info(blue_contours, (255, 0, 0), color_image)
draw_info(red_contours, (0, 0, 255), color_image)
cv2.imshow('Contours', color_image)
cv2.imshow('depth', depth_colormap)
# cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
# cv2.imshow('RealSense', images)
cv2.waitKey(1)
finally:
# Stop streaming
pipeline.stop()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment