Skip to content

Instantly share code, notes, and snippets.

@jkjung-avt
Last active November 7, 2024 11:55
Show Gist options
  • Save jkjung-avt/86b60a7723b97da19f7bfa3cb7d2690e to your computer and use it in GitHub Desktop.
Save jkjung-avt/86b60a7723b97da19f7bfa3cb7d2690e to your computer and use it in GitHub Desktop.
Capture and display video from either IP CAM, USB webcam, or the Tegra X2/X1 onboard camera.
# --------------------------------------------------------
# Camera sample code for Tegra X2/X1
#
# This program could capture and display video from
# IP CAM, USB webcam, or the Tegra onboard camera.
# Refer to the following blog post for how to set up
# and run the code:
# https://jkjung-avt.github.io/tx2-camera-with-python/
#
# Written by JK Jung <jkjung13@gmail.com>
# --------------------------------------------------------
import sys
import argparse
import subprocess
import cv2
WINDOW_NAME = 'CameraDemo'
def parse_args():
# Parse input arguments
desc = 'Capture and display live camera video on Jetson TX2/TX1'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--rtsp', dest='use_rtsp',
help='use IP CAM (remember to also set --uri)',
action='store_true')
parser.add_argument('--uri', dest='rtsp_uri',
help='RTSP URI, e.g. rtsp://192.168.1.64:554',
default=None, type=str)
parser.add_argument('--latency', dest='rtsp_latency',
help='latency in ms for RTSP [200]',
default=200, type=int)
parser.add_argument('--usb', dest='use_usb',
help='use USB webcam (remember to also set --vid)',
action='store_true')
parser.add_argument('--vid', dest='video_dev',
help='device # of USB webcam (/dev/video?) [1]',
default=1, type=int)
parser.add_argument('--width', dest='image_width',
help='image width [1920]',
default=1920, type=int)
parser.add_argument('--height', dest='image_height',
help='image height [1080]',
default=1080, type=int)
args = parser.parse_args()
return args
def open_cam_rtsp(uri, width, height, latency):
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(uri, latency, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
# We want to set width and height here, otherwise we could just do:
# return cv2.VideoCapture(dev)
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(width, height):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvcamerasrc' in gst_elements:
# On versions of L4T prior to 28.1, add 'flip-method=2' into gst_str
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
elif 'nvarguscamerasrc' in gst_elements:
gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)1920, height=(int)1080, '
'format=(string)NV12, framerate=(fraction)30/1 ! '
'nvvidconv flip-method=2 ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
else:
raise RuntimeError('onboard camera source not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_window(width, height):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.resizeWindow(WINDOW_NAME, width, height)
cv2.moveWindow(WINDOW_NAME, 0, 0)
cv2.setWindowTitle(WINDOW_NAME, 'Camera Demo for Jetson TX2/TX1')
def read_cam(cap):
show_help = True
full_scrn = False
help_text = '"Esc" to Quit, "H" for Help, "F" to Toggle Fullscreen'
font = cv2.FONT_HERSHEY_PLAIN
while True:
if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
# Check to see if the user has closed the window
# If yes, terminate the program
break
_, img = cap.read() # grab the next image frame from camera
if show_help:
cv2.putText(img, help_text, (11, 20), font,
1.0, (32, 32, 32), 4, cv2.LINE_AA)
cv2.putText(img, help_text, (10, 20), font,
1.0, (240, 240, 240), 1, cv2.LINE_AA)
cv2.imshow(WINDOW_NAME, img)
key = cv2.waitKey(10)
if key == 27: # ESC key: quit program
break
elif key == ord('H') or key == ord('h'): # toggle help message
show_help = not show_help
elif key == ord('F') or key == ord('f'): # toggle fullscreen
full_scrn = not full_scrn
if full_scrn:
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_NORMAL)
def main():
args = parse_args()
print('Called with args:')
print(args)
print('OpenCV version: {}'.format(cv2.__version__))
if args.use_rtsp:
cap = open_cam_rtsp(args.rtsp_uri,
args.image_width,
args.image_height,
args.rtsp_latency)
elif args.use_usb:
cap = open_cam_usb(args.video_dev,
args.image_width,
args.image_height)
else: # by default, use the Jetson onboard camera
cap = open_cam_onboard(args.image_width,
args.image_height)
if not cap.isOpened():
sys.exit('Failed to open camera!')
open_window(args.image_width, args.image_height)
read_cam(cap)
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
@han88
Copy link

han88 commented Jun 11, 2021

hi is their away to use tegra-cam. py on PC for rtsp streaming? usb camera works... is there an alternative for omxh264dec nvvidcov etc? thx

@jkjung-avt
Copy link
Author

@han88 You might use "avdec_h264". Reference: https://github.com/jkjung-avt/tensorrt_demos/blob/master/utils/camera.py#L66-L72

    elif 'avdec_h264' in gst_elements:
        # Otherwise try to use the software decoder 'avdec_h264'
        # NOTE: in case resizing images is necessary, try adding
        #       a 'videoscale' into the pipeline
        gst_str = ('rtspsrc location={} latency={} ! '
                   'rtph264depay ! h264parse ! avdec_h264 ! '
                   'videoconvert ! appsink').format(uri, latency)

@han88
Copy link

han88 commented Jun 13, 2021

hi thx gives me no more error messages but unfortunately still fails to open the camera now usb too..(worked only ones out of 10 times before...) maybe it's ubuntu 20.04? Can't compile the Yolo examples with Tensorrt 8.0 either...

@zahidaMassin
Copy link

@han88 i had the same error just try to uinstalll opencv and install it again, use jkjung-avt tutorial to install opencv 3.4.6
https://jkjung-avt.github.io/opencv-on-nano/

@gusarg81
Copy link

Hi,

I am trying to use this script and I got:

python3 tegra_cam.py --usb --vid 0 --rtsp --uri rtsp://10.0.1.5:554
Called with args:
Namespace(image_height=1080, image_width=1920, rtsp_latency=200, rtsp_uri='rtsp://10.0.1.5:554', use_rtsp=True, use_usb=True, video_dev=0)
OpenCV version: 4.5.2
Failed to open camera!

My camera is a UVC USB module (Arducam B0205) which is working just fine, located in /dev/video0.

Any ideas? Thanks.

@jkjung-avt
Copy link
Author

@gusarg81 Since you are using the USB webcam (/dev/video0), you shouldn't have added "--rtsp --uri rtsp://10.0.1.5:554" in the command line.

@gusarg81
Copy link

Yeah sorry, was a bad pasted here. I was typing in fact without --rtsp and --uri. The problem was opencv installation (fixed).

Also, in my case since the camera supports MJPEG and YUYV, with this script the capture was slow (since YUYV at 1080, captures max 5fps while with MJPEG max is 30fps).

To solve this, I used image/jpeg and jpegdec.

@kuzmich9023
Copy link

Hey! I am having a problem running the example. I tried to connect as in the article to the IP camera using the rtsp protocol. An error popped up in the terminal:
Снимок экрана от 2021-08-07 19-49-29

@kuzmich9023
Copy link

kuzmich9023 commented Aug 8, 2021

I entered the command from the post you gave me. Where else can I fix it to make it work for me?
Снимок экрана от 2021-08-08 09-06-44

@niteshgaba
Copy link

niteshgaba commented Aug 8, 2021 via email

@kuzmich9023
Copy link

You can write an example. I'm just starting to understand Linux ...

@glemarivero
Copy link

glemarivero commented Nov 3, 2021

Hi, Is it possible to stream onboard camera output of jetson nano to 2 virtual camera sinks? I tried doing that with: gst-launch-1.0 -v nvarguscamerasrc ! 'video/x-raw(memory:NVMM), format=NV12, width=1920, height=1080, framerate=30/1' ! nvvidconv ! 'video/x-raw, width=640, height=480, format=I420, framerate=30/1' ! videoconvert ! identity drop-allocation=1 ! 'video/x-raw, width=640, height=480, format=RGB, framerate=30/1' ! v4l2sink device=/dev/video3 v4l2sink device=/dev/video4

but it isn't working. Essentially, I want to mirror my onboard camera stream to device's web browser for which I created 2 virtual camera devices namely /dev/video3 and /dev/video4 but it only streams to a single virtual camera device (this command works in case of single sink)

in case @bhavitvyamalik (or anyone else) is still wondering how to achieve this, you need to use tee and queue:

First create the two devices: sudo modprobe v4l2loopback devices=2
gst-launch-1.0 -v nvarguscamerasrc ! 'video/x-raw(memory:NVMM), format=NV12, width=640, height=480, framerate=30/1' ! nvvidconv ! 'video/x-raw, width=640, height=480, format=I420, framerate=30/1' ! videoconvert ! identity drop-allocation=1 ! 'video/x-raw, width=640, height=480, format=RGB, framerate=30/1' ! tee name=t ! queue ! v4l2sink device=/dev/video1 t. ! queue ! v4l2sink device=/dev/video2

@kuzmich9023
Copy link

kuzmich9023 commented Nov 3, 2021 via email

@niteshgaba
Copy link

@jkjung-avt : Could you please help me to save the video instead of image?

@jkjung-avt
Copy link
Author

@niteshgaba Please refer to:

@niteshgaba
Copy link

niteshgaba commented Dec 2, 2021

@jkjung-avt : Thanks for the links. Do you have any idea about known issues of opencv 4.1.1 with gstreamer. If i run the image capture for 5 seconds in a thread the pipeline closes fine and if ran for a large number of seconds, it just stays and does not closes.

@jkjung-avt
Copy link
Author

Do you have any idea about known issues of opencv 4.1.1 with gstreamer. If i run the image capture for 5 seconds in a thread the pipeline closes fine and if ran for a large number of seconds, it just stays and does not closes.

Sorry. It does not ring a bell for me.

@niteshgaba
Copy link

niteshgaba commented Dec 2, 2021 via email

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment