Skip to content

Instantly share code, notes, and snippets.

@matthiasguentert
Last active January 27, 2019 00:40
Show Gist options
  • Save matthiasguentert/fd35210a858ff8c41053130145b61a33 to your computer and use it in GitHub Desktop.
Save matthiasguentert/fd35210a858ff8c41053130145b61a33 to your computer and use it in GitHub Desktop.
# https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_tutorials.html
# Contains Enumerations for ONI devices
# http://docs.ros.org/kinetic/api/libfreenect/html/OniCEnums_8h.html#ac214498322fe73b8c454335aeb7f289f
# List of mat types in opencv
# http://ninghang.blogspot.com/2012/11/list-of-mat-type-in-opencv.html
import sys
from datetime import datetime
import numpy as np
import cv2
from openni import openni2
from openni import _openni2 as c_api
openni2.initialize("C:/Dev/Downloads/Orbbec/OpenNI_2.3.0.43/Windows/OpenNI-Windows-x64-2.3/Redist")
if openni2.is_initialized() == True:
print("Initialzed")
else:
print("Something went wrong")
device = openni2.Device.open_any()
if device.get_depth_color_sync_enabled():
print('depth_color_sync is enabled')
else:
print('depth_color_sync is disabled')
device_info = device.get_device_info()
print('Vendor: ', device_info.vendor.decode("utf-8"))
print('Name: ', device_info.name.decode("utf-8"))
print('InfraRed sensor info')
ir_sensor_info = device.get_sensor_info(c_api.OniSensorType.ONI_SENSOR_IR)
for row in ir_sensor_info.videoModes:
print(row)
print('Depth sensor info')
depth_sensor_info = device.get_sensor_info(c_api.OniSensorType.ONI_SENSOR_DEPTH)
for row in depth_sensor_info.videoModes:
print(row)
# It seems we only can obtain depth and ir streams
# color streams seems not available... no idea why
ir_stream = device.create_ir_stream()
# Read 16bit gray scale image with provided resolution
#ir_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_GRAY16, resolutionX = 320, resolutionY = 240, fps = 30))
ir_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX = 320, resolutionY = 240, fps = 30))
ir_stream.start()
while(True):
# http://docs.ros.org/kinetic/api/libfreenect/html/classopenni_1_1VideoFrameRef.html
# Returns a VideoRameRef
frame = ir_stream.read_frame()
# Display frame index we are currently at
print('FrameIndex: ', frame.frameIndex)
# returns a 1-dim c_ushort_array of len: 76'800 = 320x240 each pixel having an uint16 value 0..65535
frame_data = frame.get_buffer_as_uint16()
# converting to numpy.ndarray which is still 1-dimension only
img = np.frombuffer(frame_data, dtype=np.uint16)
# convert to 3-dimensional array
temp = img.reshape(240, 320)
# fill first dimension with data
temp = np.concatenate((temp, temp, temp), axis=0)
# Mat needs to be in format of mat(high, width, channels)
# we swap 1 with 1 with 320
temp = np.swapaxes(temp, 0, 2)
# we swap 320 with 240 => (240, 320, 1)
temp = np.swapaxes(temp, 0, 1)
# Display image
cv2.imshow("image", temp)
# Wait for input
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
ir_stream.stop()
openni2.unload()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment