Skip to content

Instantly share code, notes, and snippets.

@MC42
Forked from joinAero/camera.py
Created June 29, 2019 03:56
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save MC42/6f228f7aa3a878368b2394973b0cf905 to your computer and use it in GitHub Desktop.
Save MC42/6f228f7aa3a878368b2394973b0cf905 to your computer and use it in GitHub Desktop.
Use Kinect with OpenCV (Python)
import cv2
import sys
class Camera(object):
def __init__(self, index=0):
self.cap = cv2.VideoCapture(index)
self.openni = index in (cv2.CAP_OPENNI, cv2.CAP_OPENNI2)
self.fps = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.release()
def release(self):
if not self.cap: return
self.cap.release()
self.cap = None
def capture(self, callback, gray=True):
if not self.cap:
sys.exit('The capture is not ready')
while True:
t = cv2.getTickCount()
if self.openni:
if not self.cap.grab():
sys.exit('Grabs the next frame failed')
ret, depth = self.cap.retrieve(cv2.CAP_OPENNI_DEPTH_MAP)
ret, frame = self.cap.retrieve(cv2.CAP_OPENNI_GRAY_IMAGE
if gray else cv2.CAP_OPENNI_BGR_IMAGE)
if callback:
callback(frame, depth, self.fps)
else:
ret, frame = self.cap.read()
if not ret:
sys.exit('Reads the next frame failed')
if gray:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if callback:
callback(frame, self.fps)
t = cv2.getTickCount() - t
self.fps = cv2.getTickFrequency() / t
# esc, q
ch = cv2.waitKey(10) & 0xFF
if ch == 27 or ch == ord('q'):
break
def fps(self):
return self.fps
def get(self, prop_id):
return self.cap.get(prop_id)
def set(self, prop_id, value):
self.cap.set(prop_id, value)
if __name__ == '__main__':
callback = lambda gray, fps: cv2.imshow('gray', gray)
with Camera(0) as cam:
print("Camera: %dx%d, %d" % (
cam.get(cv2.CAP_PROP_FRAME_WIDTH),
cam.get(cv2.CAP_PROP_FRAME_HEIGHT),
cam.get(cv2.CAP_PROP_FPS)))
cam.capture(callback)
cv2.destroyAllWindows()
import cv2
FONT_FACE = cv2.FONT_HERSHEY_SIMPLEX
FONT_SCALE = 1
THICKNESS = 1
class Gravity:
TOP_LEFT = 1
TOP_RIGHT = 2
BOTTOM_LEFT = 3
BOTTOM_RIGHT = 4
def put_text(img, text, gravity, margin=10):
h, w = img.shape[:2]
# getTextSize, result: ((width, height), baseline)
x, y = cv2.getTextSize(text, FONT_FACE, FONT_SCALE, THICKNESS)[0];
# putText(img, text, org, fontFace, fontScale, color, thickness, lineType)
org = {
Gravity.TOP_LEFT: (margin, margin+y),
Gravity.TOP_RIGHT: (w-margin-x, margin+y),
Gravity.BOTTOM_LEFT: (margin, h-margin),
Gravity.BOTTOM_RIGHT: (w-margin-x, h-margin),
}.get(gravity, (margin, margin+y))
cv2.putText(img, text, org, FONT_FACE, FONT_SCALE,
(255,255,255), THICKNESS, cv2.LINE_AA)
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import cv2
import numpy as np
from common.camera import Camera
from common.draw import Gravity, put_text
def main():
def callback(frame, depth, fps):
# Normalize the depth for representation
min, max = depth.min(), depth.max()
depth = np.uint8(255 * (depth - min) / (max - min))
# Unable to retrieve correct frame, it's still depth here
put_text(frame, "{1}x{0}".format(*frame.shape), Gravity.TOP_LEFT)
put_text(depth, "{1}x{0}".format(*depth.shape), Gravity.TOP_LEFT)
put_text(depth, "%.1f" % fps, Gravity.TOP_RIGHT)
cv2.imshow('frame', frame)
cv2.imshow('depth', depth)
with Camera(cv2.CAP_OPENNI2) as cam:
print("Camera: %dx%d, %d" % (
cam.get(cv2.CAP_OPENNI_IMAGE_GENERATOR + cv2.CAP_PROP_FRAME_WIDTH),
cam.get(cv2.CAP_OPENNI_IMAGE_GENERATOR + cv2.CAP_PROP_FRAME_HEIGHT),
cam.get(cv2.CAP_OPENNI_IMAGE_GENERATOR + cv2.CAP_PROP_FPS)))
cam.capture(callback, False)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
# Using Kinect and other OpenNI compatible depth sensors:
# http://docs.opencv.org/master/d7/d6f/tutorial_kinect_openni.html
# OpenCV Python unable to access correct OpenNI device channels:
# https://github.com/opencv/opencv/issues/4735
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment