Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
Capture and display video from either IP CAM, USB webcam, or the Tegra X2/X1 onboard camera.
# --------------------------------------------------------
# Camera sample code for Tegra X2/X1
#
# This program could capture and display video from
# IP CAM, USB webcam, or the Tegra onboard camera.
# Refer to the following blog post for how to set up
# and run the code:
# https://jkjung-avt.github.io/tx2-camera-with-python/
#
# Written by JK Jung <jkjung13@gmail.com>
# --------------------------------------------------------
import sys
import argparse
import subprocess
import cv2
WINDOW_NAME = 'CameraDemo'
def parse_args():
# Parse input arguments
desc = 'Capture and display live camera video on Jetson TX2/TX1'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--rtsp', dest='use_rtsp',
help='use IP CAM (remember to also set --uri)',
action='store_true')
parser.add_argument('--uri', dest='rtsp_uri',
help='RTSP URI, e.g. rtsp://192.168.1.64:554',
default=None, type=str)
parser.add_argument('--latency', dest='rtsp_latency',
help='latency in ms for RTSP [200]',
default=200, type=int)
parser.add_argument('--usb', dest='use_usb',
help='use USB webcam (remember to also set --vid)',
action='store_true')
parser.add_argument('--vid', dest='video_dev',
help='device # of USB webcam (/dev/video?) [1]',
default=1, type=int)
parser.add_argument('--width', dest='image_width',
help='image width [1920]',
default=1920, type=int)
parser.add_argument('--height', dest='image_height',
help='image height [1080]',
default=1080, type=int)
args = parser.parse_args()
return args
def open_cam_rtsp(uri, width, height, latency):
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(uri, latency, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
# We want to set width and height here, otherwise we could just do:
# return cv2.VideoCapture(dev)
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(width, height):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvcamerasrc' in gst_elements:
# On versions of L4T prior to 28.1, add 'flip-method=2' into gst_str
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
elif 'nvarguscamerasrc' in gst_elements:
gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)1920, height=(int)1080, '
'format=(string)NV12, framerate=(fraction)30/1 ! '
'nvvidconv flip-method=2 ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
else:
raise RuntimeError('onboard camera source not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_window(width, height):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.resizeWindow(WINDOW_NAME, width, height)
cv2.moveWindow(WINDOW_NAME, 0, 0)
cv2.setWindowTitle(WINDOW_NAME, 'Camera Demo for Jetson TX2/TX1')
def read_cam(cap):
show_help = True
full_scrn = False
help_text = '"Esc" to Quit, "H" for Help, "F" to Toggle Fullscreen'
font = cv2.FONT_HERSHEY_PLAIN
while True:
if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
# Check to see if the user has closed the window
# If yes, terminate the program
break
_, img = cap.read() # grab the next image frame from camera
if show_help:
cv2.putText(img, help_text, (11, 20), font,
1.0, (32, 32, 32), 4, cv2.LINE_AA)
cv2.putText(img, help_text, (10, 20), font,
1.0, (240, 240, 240), 1, cv2.LINE_AA)
cv2.imshow(WINDOW_NAME, img)
key = cv2.waitKey(10)
if key == 27: # ESC key: quit program
break
elif key == ord('H') or key == ord('h'): # toggle help message
show_help = not show_help
elif key == ord('F') or key == ord('f'): # toggle fullscreen
full_scrn = not full_scrn
if full_scrn:
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_NORMAL)
def main():
args = parse_args()
print('Called with args:')
print(args)
print('OpenCV version: {}'.format(cv2.__version__))
if args.use_rtsp:
cap = open_cam_rtsp(args.rtsp_uri,
args.image_width,
args.image_height,
args.rtsp_latency)
elif args.use_usb:
cap = open_cam_usb(args.video_dev,
args.image_width,
args.image_height)
else: # by default, use the Jetson onboard camera
cap = open_cam_onboard(args.image_width,
args.image_height)
if not cap.isOpened():
sys.exit('Failed to open camera!')
open_window(args.image_width, args.image_height)
read_cam(cap)
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
@raaka1

This comment has been minimized.

Copy link

raaka1 commented Apr 24, 2019

~/src/Shepherd$ sudo python tegra-cam.py --rtsp --uri rtsp://admin:123456@192.168.10.57/stream0
[sudo] password for ravi:
Called with args:
Namespace(image_height=1080, image_width=1920, rtsp_latency=200, rtsp_uri='rtsp://admin:123456@192.168.10.57/stream0', use_rtsp=True, use_usb=False, video_dev=1)
OpenCV version: 3.4.3

(python:7703): GStreamer-CRITICAL **: 15:46:08.082: gst_element_get_state: assertion 'GST_IS_ELEMENT (element)' failed
Failed to open camera!

@jkjung-avt

This comment has been minimized.

Copy link
Owner Author

jkjung-avt commented May 15, 2019

Run the following from a command line to make sure the RTSP source is working first:

$ gst-launch-1.0 rtspsrc location=rtsp://admin:123456@192.168.10.57/stream0 latency=2000 ! rtph264depay ! h264parse ! omxh264dec ! nveglglessink

@wangtiancai

This comment has been minimized.

Copy link

wangtiancai commented Jun 21, 2019

hello,do (python3.5:5917): GStreamer-CRITICAL **: gst_element_make_from_uri: assertion 'gst_uri_is_valid (uri)' failed

Available Sensor modes :
2592 x 1944 FR=30.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10
2592 x 1458 FR=30.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10
1280 x 720 FR=120.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10

NvCameraSrc: Trying To Set Default Camera Resolution. Selected sensorModeIndex = 1 WxH = 2592x1458 FrameRate = 30.000000 ...

OpenCV Error: Unspecified error (GStreamer: unable to start pipeline
) in cvCaptureFromCAM_GStreamer, file /home/nvidia/opencv-3.4.0/modules/videoio/src/cap_gstreamer.cpp, line 890
VIDEOIO(cvCreateCapture_GStreamer (CV_CAP_GSTREAMER_FILE, filename)): raised OpenCV exception:

/home/nvidia/opencv-3.4.0/modules/videoio/src/cap_gstreamer.cpp:890: error: (-2) GStreamer: unable to start pipeline
in function cvCaptureFromCAM_GStreamer
you know why the problem happen?

@wangtiancai

This comment has been minimized.

Copy link

wangtiancai commented Jun 21, 2019

hello,this problem t can not solve ,can can you fix it ?
(python3.5:5917): GStreamer-CRITICAL **: gst_element_make_from_uri: assertion 'gst_uri_is_valid (uri)' failed

Available Sensor modes :
2592 x 1944 FR=30.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10
2592 x 1458 FR=30.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10
1280 x 720 FR=120.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10

NvCameraSrc: Trying To Set Default Camera Resolution. Selected sensorModeIndex = 1 WxH = 2592x1458 FrameRate = 30.000000 ...

OpenCV Error: Unspecified error (GStreamer: unable to start pipeline
) in cvCaptureFromCAM_GStreamer, file /home/nvidia/opencv-3.4.0/modules/videoio/src/cap_gstreamer.cpp, line 890
VIDEOIO(cvCreateCapture_GStreamer (CV_CAP_GSTREAMER_FILE, filename)): raised OpenCV exception:

/home/nvidia/opencv-3.4.0/modules/videoio/src/cap_gstreamer.cpp:890: error: (-2) GStreamer: unable to start pipeline
in function cvCaptureFromCAM_GStreamer

@wangtiancai

This comment has been minimized.

Copy link

wangtiancai commented Jun 21, 2019

open_cam_onboard ,it can not be opened

@jkjung-avt

This comment has been minimized.

Copy link
Owner Author

jkjung-avt commented Jun 23, 2019

@wangtiancai, are you using a Jetson TX2 with JetPack-3.3 or 3.2.1? Could you check whether your TX2 on-board camera works from command-line tests first? Please refer to: https://developer.nvidia.com/embedded/dlc/l4t-accelerated-gstreamer-guide-28-2-ga

For example (page 21 in the above document):

gst-launch-1.0 nvcamerasrc fpsRange="30.0 30.0" ! \
'video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, \
 format=(string)I420, framerate=(fraction)30/1' ! nvtee ! \
nvvidconv flip-method=2 ! \
'video/x-raw(memory:NVMM), format=(string)I420' ! nvoverlaysink -e 
@wangtiancai

This comment has been minimized.

Copy link

wangtiancai commented Jun 23, 2019

@wangtiancai

This comment has been minimized.

Copy link

wangtiancai commented Jun 23, 2019

@wangtiancai

This comment has been minimized.

Copy link

wangtiancai commented Jun 23, 2019

@wangtiancai

This comment has been minimized.

Copy link

wangtiancai commented Jun 23, 2019

i have solve the problem by add this path in VideoCapture('nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720,format=(string)I420, framerate=(fraction)24/1 ! nvvidconv! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink'),thank you for your help

@anmol8496

This comment has been minimized.

Copy link

anmol8496 commented Jun 26, 2019

I write the video using opencv that is recorded with some frame drops. so that video save look like fast forward or less time duration as compare to original video. if any solution possible for this problem please tell us.

-----------------thanks-------------

@wangtiancai

This comment has been minimized.

Copy link

wangtiancai commented Jun 26, 2019

@jkjung-avt

This comment has been minimized.

Copy link
Owner Author

jkjung-avt commented Jun 27, 2019

@anmol8496 Your problem is highly dependent on how you process and save the frames. If you are using cv2.VideoWriter (as described in the StackOverflow post below), you could modify the fps value, say from 20.0 to 5.0, to cope with the problem.

https://stackoverflow.com/questions/30509573/writing-an-mp4-video-using-python-opencv/54731615#54731615

@anmol8496

This comment has been minimized.

Copy link

anmol8496 commented Jun 27, 2019

I share my code with you. i took the video from ip camera that have 25 frame per second but i recorded video with 25 fps than i found video has less frames and i run for 30 seconds but video duration less than 30 second cause of the frame drop. i count the frame there has huge difference. according to my calculation in 30 sec 25*30 frames in streamed video but i write the video that have approximate 310 frames. it run repeatably than we found that frame count also variable but it considerable (difference is 10-15 frames).

----------------------------------------------------------------------mycode.py-------------------------------------------------------
import numpy as np
import os
from datetime import datetime
import time
from datetime import date
import socket
def forrepeat():
while(True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('192.168.11.89',8811))
if result == 0:

		a=str(date.today())+"_1_"+str(datetime.time(datetime.now()))+"."+"avi"   
		print(a)
		FILE_OUTPUT = a   
		if os.path.isfile(FILE_OUTPUT):
		    os.remove(FILE_OUTPUT)
		
		cap = cv2.VideoCapture("http://admin:admin@192.168.11.89:8811/")
		fps = cap.get(cv2.CAP_PROP_FPS)
		fourcc = cv2.VideoWriter_fourcc(*'MPEG')
		out = cv2.VideoWriter(FILE_OUTPUT,fourcc, fps, (1280,720))
		start = time.time()
		while(cap.isOpened()):
			t_end = start + 30
			if time.time()<t_end:
				ret, frame = cap.read()
				if (ret == True):
					cv2.imshow('titile_name',frame)
					out.write(frame)
				else:
					break		    
				if cv2.waitKey(1) & 0xFF == ord('q'):
					break
			else:
				break	

forrepeat()

When everything done, release the capture

cap.release()
out.release()
cv2.destroyAllWindows()

@anmol8496

This comment has been minimized.

Copy link

anmol8496 commented Jun 27, 2019

https://answers.opencv.org/question/94012/is-opencvs-videocaptureread-function-skipping-frames/
there is also frame skipping problem.
I think, it's library problem that take buffer time to read frame in this buffer time duration some frames are missed from the reading opeartion.

if any other solution to write the video from the ip camera, please share with me.
---------------thanks------------

@wangtiancai

This comment has been minimized.

Copy link

wangtiancai commented Jun 27, 2019

@anmol8496

This comment has been minimized.

Copy link

anmol8496 commented Jun 28, 2019

@wangtiancai we have not any kind or resolution problem or frame rate. One person comment on my issue that present on link that shown in below. opencv/opencv#14911
But according to me:
read() and write() operation have large buffer time or they not keep frames as FIFO like concept. so they should miss the frames.
if i found another framework that provide good result than i share with you.
----------thanks

@lab1104

This comment has been minimized.

Copy link

lab1104 commented Sep 5, 2019

Hi Anmol,
I am using FLIR Grasshopper USB3 camera. When I am trying to run "python3 tegra-cam.py". I am getting the following error.

OpenCV Error: Unspecified error (GStreamer: unable to start pipeline
) in cvCaptureFromCAM_GStreamer, file /home/nvidia/build_opencv/opencv/modules/videoio/src/cap_gstreamer.cpp, line 887
VIDEOIO(cvCreateCapture_GStreamer (CV_CAP_GSTREAMER_FILE, filename)): raised OpenCV exception:

/home/nvidia/build_opencv/opencv/modules/videoio/src/cap_gstreamer.cpp:887: error: (-2) GStreamer: unable to start pipeline
in function cvCaptureFromCAM_GStreamer

Traceback (most recent call last):
File "tegra-cam.py", line 159, in
main()
File "tegra-cam.py", line 146, in main
args.image_height)
File "tegra-cam.py", line 87, in open_cam_onboard
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
KeyboardInterrupt

Please help..

@anmol8496

This comment has been minimized.

Copy link

anmol8496 commented Sep 6, 2019

@lab1104 you are use external camera. i think you are firstly install camera driver to your system. after that try to run the code.

@guoxiaolu

This comment has been minimized.

Copy link

guoxiaolu commented Jun 10, 2020

how to read local video?
this is warning message, when videocapture(filepath) directly
Opening in BLOCKING MODE
NvMMLiteOpen : Block : BlockType = 260
NVMEDIA: Reading vendor.tegra.display-size : status: 6
NvMMLiteBlockCreate : Block : BlockType = 260
[ WARN:0] global /home/nvidia/Downloads/opencv-4.2.0.zip/opencv-4.2.0/modules/videoio/src/cap_gstreamer.cpp (1759) handleMessage OpenCV | GStreamer warning: Embedded video playback halted; module avidemux0 reported: Internal data stream error.
[ WARN:0] global /home/nvidia/Downloads/opencv-4.2.0.zip/opencv-4.2.0/modules/videoio/src/cap_gstreamer.cpp (888) open OpenCV | GStreamer warning: unable to start pipeline
[ WARN:0] global /home/nvidia/Downloads/opencv-4.2.0.zip/opencv-4.2.0/modules/videoio/src/cap_gstreamer.cpp (480) isPipelinePlaying OpenCV | GStreamer warning: GStreamer: pipeline have not been created

@zhe-li933

This comment has been minimized.

Copy link

zhe-li933 commented Jun 19, 2020

how to read local video?
this is warning message, when videocapture(filepath) directly
Opening in BLOCKING MODE
NvMMLiteOpen : Block : BlockType = 260
NVMEDIA: Reading vendor.tegra.display-size : status: 6
NvMMLiteBlockCreate : Block : BlockType = 260
[ WARN:0] global /home/nvidia/Downloads/opencv-4.2.0.zip/opencv-4.2.0/modules/videoio/src/cap_gstreamer.cpp (1759) handleMessage OpenCV | GStreamer warning: Embedded video playback halted; module avidemux0 reported: Internal data stream error.
[ WARN:0] global /home/nvidia/Downloads/opencv-4.2.0.zip/opencv-4.2.0/modules/videoio/src/cap_gstreamer.cpp (888) open OpenCV | GStreamer warning: unable to start pipeline
[ WARN:0] global /home/nvidia/Downloads/opencv-4.2.0.zip/opencv-4.2.0/modules/videoio/src/cap_gstreamer.cpp (480) isPipelinePlaying OpenCV | GStreamer warning: GStreamer: pipeline have not been created

Have you solved your problem? Once you solved it, can you share any of your opinions with me ? Appreciate it !

@guoxiaolu

This comment has been minimized.

Copy link

guoxiaolu commented Jun 22, 2020

how to read local video?
this is warning message, when videocapture(filepath) directly
Opening in BLOCKING MODE
NvMMLiteOpen : Block : BlockType = 260
NVMEDIA: Reading vendor.tegra.display-size : status: 6
NvMMLiteBlockCreate : Block : BlockType = 260
[ WARN:0] global /home/nvidia/Downloads/opencv-4.2.0.zip/opencv-4.2.0/modules/videoio/src/cap_gstreamer.cpp (1759) handleMessage OpenCV | GStreamer warning: Embedded video playback halted; module avidemux0 reported: Internal data stream error.
[ WARN:0] global /home/nvidia/Downloads/opencv-4.2.0.zip/opencv-4.2.0/modules/videoio/src/cap_gstreamer.cpp (888) open OpenCV | GStreamer warning: unable to start pipeline
[ WARN:0] global /home/nvidia/Downloads/opencv-4.2.0.zip/opencv-4.2.0/modules/videoio/src/cap_gstreamer.cpp (480) isPipelinePlaying OpenCV | GStreamer warning: GStreamer: pipeline have not been created

Have you solved your problem? Once you solved it, can you share any of your opinions with me ? Appreciate it !

this link: https://forums.developer.nvidia.com/t/reading-mp4-file-via-gstreamer-in-opencv/81219/18
`import cv2
print(cv2.version)

filepath = "/home/nvidia/Desktop/opencv/opencv/samples/data/Megamind.avi"

cap = cv2.VideoCapture('filesrc location={} ! mpeg4videoparse ! omxmpeg4videodec ! nvvidconv ! video/x-raw,format=BGRx ! queue ! videoconvert ! queue ! video/x-raw, format=BGR ! appsink'.format(filepath), cv2.CAP_GSTREAMER)

if not cap.isOpened():
print("Failed to open capture")
exit()

while True:
ret, frame = cap.read()
cv2.imshow('Test', frame)
cv2.waitKey(1)`

@wurongyuan

This comment has been minimized.

Copy link

wurongyuan commented Jul 9, 2020

Hi, I run the code to start my onboard camera and fail, however,
I use the command ,

gst-launch-1.0 nvarguscamerasrc ! 'video/x-raw(memory:NVMM), width=640, height=480, framerate=30/1, format=NV12' ! nvvidconv flip-method=2 ! nvegltransform ! nveglglessink -e

which can open the camera.
I also have a try to change the code below but failed

gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)1920, height=(int)1080, '
'format=(string)NV12, framerate=(fraction)30/1 ! '
'nvvidconv flip-method=2 ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)

what should I do to open the onboard camera form opencv-python?

@jkjung-avt

This comment has been minimized.

Copy link
Owner Author

jkjung-avt commented Jul 10, 2020

Try converting your gst-launch-1.0 command:

gst-launch-1.0 nvarguscamerasrc ! 'video/x-raw(memory:NVMM), width=640, height=480, framerate=30/1, format=NV12' ! nvvidconv flip-method=2 ! nvegltransform ! nveglglessink -e

to a gstreamer pipeline string for cv2.VideoCapture() like this:

gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), width=(int)640, height=(int)480, framerate=(fraction)30/1, format=(string)NV12 ! '
'nvvidconv flip-method=2 ! '
'nvegltransform ! '
'videoconvert ! '
'appsink').format(width, height)

You could also try to remove either "videoconvert" or "nvegltransform" from the pipeline string above, to see if it still works and might run faster.

@bhavitvyamalik

This comment has been minimized.

Copy link

bhavitvyamalik commented Jul 25, 2020

Hi,
Is it possible to stream onboard camera output of jetson nano to 2 virtual camera sinks? I tried doing that with:
gst-launch-1.0 -v nvarguscamerasrc ! 'video/x-raw(memory:NVMM), format=NV12, width=1920, height=1080, framerate=30/1' ! nvvidconv ! 'video/x-raw, width=640, height=480, format=I420, framerate=30/1' ! videoconvert ! identity drop-allocation=1 ! 'video/x-raw, width=640, height=480, format=RGB, framerate=30/1' ! v4l2sink device=/dev/video3 v4l2sink device=/dev/video4

but it isn't working. Essentially, I want to mirror my onboard camera stream to device's web browser for which I created 2 virtual camera devices namely /dev/video3 and /dev/video4 but it only streams to a single virtual camera device (this command works in case of single sink)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.