Skip to content

Instantly share code, notes, and snippets.

@Erol444
Last active May 7, 2023 20:04
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Erol444/25f374fa18efa7939ec9bb848b39249a to your computer and use it in GitHub Desktop.
Save Erol444/25f374fa18efa7939ec9bb848b39249a to your computer and use it in GitHub Desktop.
DepthAI OAK Luxonis - RGB depth alignment with 12MP color stream
#!/usr/bin/env python3
import cv2
import numpy as np
import depthai as dai
# Weights to use when blending depth/rgb image (should equal 1.0)
rgbWeight = 0.4
depthWeight = 0.6
def updateBlendWeights(percent_rgb):
"""
Update the rgb and depth weights used to blend depth/rgb image
@param[in] percent_rgb The rgb weight expressed as a percentage (0..100)
"""
global depthWeight
global rgbWeight
rgbWeight = float(percent_rgb)/100.0
depthWeight = 1.0 - rgbWeight
# Optional. If set (True), the ColorCamera is downscaled from 1080p to 720p.
# Otherwise (False), the aligned depth is automatically upscaled to 1080p
downscaleColor = True
fps = 10
# The disparity is computed at this resolution, then upscaled to RGB resolution
monoResolution = dai.MonoCameraProperties.SensorResolution.THE_800_P
# Create pipeline
pipeline = dai.Pipeline()
device = dai.Device()
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
left = pipeline.create(dai.node.MonoCamera)
right = pipeline.create(dai.node.MonoCamera)
stereo = pipeline.create(dai.node.StereoDepth)
rgbOut = pipeline.create(dai.node.XLinkOut)
depthOut = pipeline.create(dai.node.XLinkOut)
rgbOut.setStreamName("rgb")
depthOut.setStreamName("depth")
#Properties
camRgb.setBoardSocket(dai.CameraBoardSocket.RGB)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_12_MP) # 4056x3040
try:
calibData = device.readCalibration2()
lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.RGB)
if lensPosition:
camRgb.initialControl.setManualFocus(lensPosition)
except:
raise
left.setResolution(monoResolution)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
left.setFps(fps)
right.setResolution(monoResolution)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
right.setFps(fps)
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
# LR-check is required for depth alignment
stereo.setLeftRightCheck(True)
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
# # 4056x3040
stereo.setOutputSize(1248, 936)
# Linking
camRgb.isp.link(rgbOut.input)
left.out.link(stereo.left)
right.out.link(stereo.right)
stereo.depth.link(depthOut.input)
# Connect to device and start pipeline
with device:
device.startPipeline(pipeline)
frameRgb = None
depthFrame = None
# Configure windows; trackbar adjusts blending ratio of rgb/depth
rgbWindowName = "rgb"
depthWindowName = "depth"
blendedWindowName = "rgb-depth"
cv2.namedWindow(rgbWindowName)
cv2.namedWindow(depthWindowName)
cv2.namedWindow(blendedWindowName)
cv2.createTrackbar('RGB Weight %', blendedWindowName, int(rgbWeight*100), 100, updateBlendWeights)
while True:
latestPacket = {}
latestPacket["rgb"] = None
latestPacket["depth"] = None
queueEvents = device.getQueueEvents(("rgb", "depth"))
for queueName in queueEvents:
packets = device.getOutputQueue(queueName).tryGetAll()
if len(packets) > 0:
latestPacket[queueName] = packets[-1]
if latestPacket["rgb"] is not None:
frameRgb = latestPacket["rgb"].getCvFrame()
frameRgb = cv2.resize(frameRgb, (1248, 936), interpolation=cv2.INTER_NEAREST)
cv2.imshow(rgbWindowName, frameRgb)
if latestPacket["depth"] is not None:
depthFrame = latestPacket["depth"].getFrame()
depthFrame = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrame = cv2.equalizeHist(depthFrame)
depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_HOT)
cv2.imshow(depthWindowName, depthFrame)
# Blend when both received
if frameRgb is not None and depthFrame is not None:
# Need to have both frames in BGR format before blending
if len(depthFrame.shape) < 3:
depthFrame = cv2.cvtColor(depthFrame, cv2.COLOR_GRAY2BGR)
blended = cv2.addWeighted(frameRgb, rgbWeight, depthFrame, depthWeight, 0)
cv2.imshow(blendedWindowName, blended)
frameRgb = None
depthFrame = None
if cv2.waitKey(1) == ord('q'):
break
#!/usr/bin/env python3
import cv2
import numpy as np
import depthai as dai
# Weights to use when blending depth/rgb image (should equal 1.0)
rgbWeight = 0.4
depthWeight = 0.6
def updateBlendWeights(percent_rgb):
"""
Update the rgb and depth weights used to blend depth/rgb image
@param[in] percent_rgb The rgb weight expressed as a percentage (0..100)
"""
global depthWeight
global rgbWeight
rgbWeight = float(percent_rgb)/100.0
depthWeight = 1.0 - rgbWeight
# Optional. If set (True), the ColorCamera is downscaled from 1080p to 720p.
# Otherwise (False), the aligned depth is automatically upscaled to 1080p
downscaleColor = True
fps = 10
# The disparity is computed at this resolution, then upscaled to RGB resolution
monoResolution = dai.MonoCameraProperties.SensorResolution.THE_800_P
# Create pipeline
pipeline = dai.Pipeline()
device = dai.Device()
queueNames = []
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
left = pipeline.create(dai.node.MonoCamera)
right = pipeline.create(dai.node.MonoCamera)
stereo = pipeline.create(dai.node.StereoDepth)
rgbOut = pipeline.create(dai.node.XLinkOut)
disparityOut = pipeline.create(dai.node.XLinkOut)
rgbOut.setStreamName("rgb")
queueNames.append("rgb")
disparityOut.setStreamName("disp")
queueNames.append("disp")
#Properties
camRgb.setBoardSocket(dai.CameraBoardSocket.RGB)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_12_MP) # 4056x3040
try:
calibData = device.readCalibration2()
lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.RGB)
if lensPosition:
camRgb.initialControl.setManualFocus(lensPosition)
except:
raise
left.setResolution(monoResolution)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
left.setFps(fps)
right.setResolution(monoResolution)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
right.setFps(fps)
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
# LR-check is required for depth alignment
stereo.setLeftRightCheck(True)
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
# # 4056x3040
stereo.setOutputSize(1248, 936)
# Linking
camRgb.isp.link(rgbOut.input)
left.out.link(stereo.left)
right.out.link(stereo.right)
stereo.disparity.link(disparityOut.input)
# Connect to device and start pipeline
with device:
device.startPipeline(pipeline)
frameRgb = None
frameDisp = None
# Configure windows; trackbar adjusts blending ratio of rgb/depth
rgbWindowName = "rgb"
depthWindowName = "depth"
blendedWindowName = "rgb-depth"
cv2.namedWindow(rgbWindowName)
cv2.namedWindow(depthWindowName)
cv2.namedWindow(blendedWindowName)
cv2.createTrackbar('RGB Weight %', blendedWindowName, int(rgbWeight*100), 100, updateBlendWeights)
while True:
latestPacket = {}
latestPacket["rgb"] = None
latestPacket["disp"] = None
queueEvents = device.getQueueEvents(("rgb", "disp"))
for queueName in queueEvents:
packets = device.getOutputQueue(queueName).tryGetAll()
if len(packets) > 0:
latestPacket[queueName] = packets[-1]
if latestPacket["rgb"] is not None:
frameRgb = latestPacket["rgb"].getCvFrame()
frameRgb = cv2.resize(frameRgb, (1248, 936), interpolation=cv2.INTER_NEAREST)
cv2.imshow(rgbWindowName, frameRgb)
if latestPacket["disp"] is not None:
frameDisp = latestPacket["disp"].getFrame()
maxDisparity = stereo.initialConfig.getMaxDisparity()
# Optional, extend range 0..95 -> 0..255, for a better visualisation
if 1: frameDisp = (frameDisp * 255. / maxDisparity).astype(np.uint8)
# Optional, apply false colorization
if 1: frameDisp = cv2.applyColorMap(frameDisp, cv2.COLORMAP_HOT)
frameDisp = np.ascontiguousarray(frameDisp)
cv2.imshow(depthWindowName, frameDisp)
# Blend when both received
if frameRgb is not None and frameDisp is not None:
# Need to have both frames in BGR format before blending
if len(frameDisp.shape) < 3:
frameDisp = cv2.cvtColor(frameDisp, cv2.COLOR_GRAY2BGR)
blended = cv2.addWeighted(frameRgb, rgbWeight, frameDisp, depthWeight, 0)
cv2.imshow(blendedWindowName, blended)
frameRgb = None
frameDisp = None
if cv2.waitKey(1) == ord('q'):
break
@Erol444
Copy link
Author

Erol444 commented Apr 4, 2023

@mamoonik I just updated the gist so it contains both depth and disparity options.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment