Skip to content

Instantly share code, notes, and snippets.

@saching13
Last active January 14, 2022 16:04
Show Gist options
  • Save saching13/1ebbf5e6cb8a7ec8986c8ec8dc624fd8 to your computer and use it in GitHub Desktop.
Save saching13/1ebbf5e6cb8a7ec8986c8ec8dc624fd8 to your computer and use it in GitHub Desktop.
import depthai as dai
import numpy as np
import cv2
import queue
# Create pipeline
pipeline = dai.Pipeline()
# Define source and output
camRgb = pipeline.create(dai.node.ColorCamera)
xoutVideo = pipeline.create(dai.node.XLinkOut)
camControlIn = pipeline.create(dai.node.XLinkIn)
camControlIn.setStreamName('camControl')
camControlIn.out.link(camRgb.inputControl)
xoutVideo.setStreamName("video")
# Properties
camRgb.setBoardSocket(dai.CameraBoardSocket.RGB)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setVideoSize(1920, 1080)
camRgb.setFps(10)
xoutVideo.input.setBlocking(False)
xoutVideo.input.setQueueSize(10)
# Linking
camRgb.video.link(xoutVideo.input)
# exposure_times = np.array([100, 250, 500], dtype=np.float32)
# exposure_times = np.array([], dtype=np.float32)
expTimeA = 100000
expTimeS = 80000
expTimeD = 40000
sensIsoA = 400
sensIsoS = 400
sensIsoD = 700
expTime = 20000
sensIso = 100
setExposure = False
setCapture = False
q = queue.Queue(3)
exp = queue.Queue(3)
with dai.Device(pipeline) as device:
video = device.getOutputQueue(name="video", maxSize=1, blocking=False)
controlQueue = device.getInputQueue('camControl')
while True:
videoIn = video.get()
cv2.imshow("video", videoIn.getCvFrame())
if setExposure:
print('Setting expsure -> {} and ISO -> {}'.format(expTime, sensIso))
ctrl = dai.CameraControl()
ctrl.setManualExposure(expTime, sensIso)
controlQueue.send(ctrl)
setExposure = False
if setCapture:
q.put(videoIn.getCvFrame())
exp.put(expTime)
setCapture = False
if q.full():
img_list = list(q.queue)
exp_times = np.array(list(exp.queue), dtype=np.float32)
print(exp_times)
with exp.mutex:
exp.queue.clear()
with q.mutex:
q.queue.clear()
merge_debevec = cv2.createMergeDebevec()
merge_robertson = cv2.createMergeRobertson()
hdr_debevec = merge_debevec.process(img_list, times=exp_times.copy())
hdr_robertson = merge_robertson.process(img_list, times=exp_times.copy())
# Tonemap HDR image
tonemap1 = cv2.createTonemap(gamma=2.2)
res_debevec = tonemap1.process(hdr_debevec.copy())
# Exposure fusion using Mertens
merge_mertens = cv2.createMergeMertens()
res_mertens = merge_mertens.process(img_list)
# Convert datatype to 8-bit and save
res_debevec_8bit = np.clip(res_debevec*255, 0, 255).astype('uint8')
res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8')
cv2.imshow('res_debevec', res_debevec)
cv2.imshow('res_mertens_8bit', res_mertens_8bit)
# Get BGR frame from NV12 encoded video frame to show with opencv
# Visualizing the frame on slower hosts might have overhead
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('a'):
print('Getting Key a')
expTime = expTimeA
sensIso = sensIsoA
setExposure = True
elif key == ord('s'):
print('Getting Key s')
expTime = expTimeS
sensIso = sensIsoS
setExposure = True
elif key == ord('d'):
print('Getting Key d')
expTime = expTimeD
sensIso = sensIsoD
setExposure = True
elif key == ord('c'):
print('Capturing')
setCapture = True
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment