-
-
Save lweingart/870a66aced8f65975a74a39e886c78d2 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
################################################################################ | |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. | |
# | |
# Permission is hereby granted, free of charge, to any person obtaining a | |
# copy of this software and associated documentation files (the "Software"), | |
# to deal in the Software without restriction, including without limitation | |
# the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
# and/or sell copies of the Software, and to permit persons to whom the | |
# Software is furnished to do so, subject to the following conditions: | |
# | |
# The above copyright notice and this permission notice shall be included in | |
# all copies or substantial portions of the Software. | |
# | |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
# DEALINGS IN THE SOFTWARE. | |
################################################################################ | |
import sys | |
sys.path.append('../') | |
import gi | |
import configparser | |
gi.require_version('Gst', '1.0') | |
gi.require_version('GstRtspServer', '1.0') | |
from gi.repository import GObject, Gst, GstRtspServer | |
from gi.repository import GLib | |
from ctypes import * | |
import time | |
import sys | |
import math | |
import platform | |
from common.is_aarch_64 import is_aarch64 | |
from common.bus_call import bus_call | |
from common.FPS import GETFPS | |
import pyds | |
import subprocess | |
fps_streams = {} | |
MAX_DISPLAY_LEN = 64 | |
PGIE_CLASS_ID_PERSON = 0 | |
MUXER_OUTPUT_WIDTH = 1920 | |
MUXER_OUTPUT_HEIGHT = 1080 | |
MUXER_BATCH_TIMEOUT_USEC = 4000000 | |
TILED_OUTPUT_WIDTH = 1024 | |
TILED_OUTPUT_HEIGHT = 768 | |
GST_CAPS_FEATURES_NVMM = "memory:NVMM" | |
OSD_PROCESS_MODE = 0 | |
OSD_DISPLAY_TEXT = 0 | |
pgie_classes_str = ["Person"] | |
def set_network_card(): | |
# Ensure that the network interface is correctly configured | |
cmd1 = ['sudo', 'ifconfig', 'eth0', '192.168.2.2', 'netmask', '255.255.255.0', 'up'] | |
cmd2 = ['sudo', 'route', 'add', 'default', 'gw', '192.168.2.1'] | |
print('Prepare network card...') | |
print(f'Running {" ".join(cmd1)}') | |
return_code = subprocess.call(cmd1) | |
print(f'Result is {return_code}') | |
print(f'Running {" ".join(cmd2)}') | |
return_code = subprocess.call(cmd2) | |
print(f'Result is {return_code}') | |
# tiler_sink_pad_buffer_probe will extract metadata received on OSD sink pad | |
# and update params for drawing rectangle, object information etc. | |
def tiler_src_pad_buffer_probe(pad, info, u_data): | |
frame_number = 0 | |
num_rects = 0 | |
gst_buffer = info.get_buffer() | |
if not gst_buffer: | |
print("Unable to get GstBuffer ") | |
return | |
# Retrieve batch metadata from the gst_buffer | |
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the | |
# C address of gst_buffer as input, which is obtained with hash(gst_buffer) | |
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) | |
l_frame = batch_meta.frame_meta_list | |
while l_frame is not None: | |
try: | |
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta | |
# The casting is done by pyds.NvDsFrameMeta.cast() | |
# The casting also keeps ownership of the underlying memory | |
# in the C code, so the Python garbage collector will leave | |
# it alone. | |
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) | |
except StopIteration: | |
break | |
''' | |
print("Frame Number is ", frame_meta.frame_num) | |
print("Source id is ", frame_meta.source_id) | |
print("Batch id is ", frame_meta.batch_id) | |
print("Source Frame Width ", frame_meta.source_frame_width) | |
print("Source Frame Height ", frame_meta.source_frame_height) | |
print("Num object meta ", frame_meta.num_obj_meta) | |
''' | |
frame_number = frame_meta.frame_num | |
l_obj = frame_meta.obj_meta_list | |
num_rects = frame_meta.num_obj_meta | |
obj_counter = { | |
PGIE_CLASS_ID_PERSON: 0 | |
} | |
while l_obj is not None: | |
try: | |
# Casting l_obj.data to pyds.NvDsObjectMeta | |
obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) | |
except StopIteration: | |
break | |
obj_counter[obj_meta.class_id] += 1 | |
try: | |
l_obj = l_obj.next | |
except StopIteration: | |
break | |
"""display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) | |
display_meta.num_labels = 1 | |
py_nvosd_text_params = display_meta.text_params[0] | |
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Person_count={}".format(frame_number, num_rects, person) | |
py_nvosd_text_params.x_offset = 10; | |
py_nvosd_text_params.y_offset = 12; | |
py_nvosd_text_params.font_params.font_name = "Serif" | |
py_nvosd_text_params.font_params.font_size = 10 | |
py_nvosd_text_params.font_params.font_color.red = 1.0 | |
py_nvosd_text_params.font_params.font_color.green = 1.0 | |
py_nvosd_text_params.font_params.font_color.blue = 1.0 | |
py_nvosd_text_params.font_params.font_color.alpha = 1.0 | |
py_nvosd_text_params.set_bg_clr = 1 | |
py_nvosd_text_params.text_bg_clr.red = 0.0 | |
py_nvosd_text_params.text_bg_clr.green = 0.0 | |
py_nvosd_text_params.text_bg_clr.blue = 0.0 | |
py_nvosd_text_params.text_bg_clr.alpha = 1.0 | |
#print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Person_count=",person) | |
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)""" | |
print(f"Frame Number={frame_number}, Number of Objects={num_rects}, Person_count={obj_counter[PGIE_CLASS_ID_PERSON]}") | |
# Get frame rate through this probe | |
fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() | |
try: | |
l_frame = l_frame.next | |
except StopIteration: | |
break | |
return Gst.PadProbeReturn.OK | |
def cb_newpad(decodebin, decoder_src_pad, data): | |
print("In cb_newpad\n") | |
caps = decoder_src_pad.get_current_caps() | |
gststruct = caps.get_structure(0) | |
gstname = gststruct.get_name() | |
source_bin = data | |
features = caps.get_features(0) | |
# Need to check if the pad created by the decodebin is for video and not | |
# audio. | |
print("gstname=", gstname) | |
if gstname.find("video") != -1: | |
# Link the decodebin pad only if decodebin has picked nvidia | |
# decoder plugin nvdec_*. We do this by checking if the pad caps contain | |
# NVMM memory features. | |
print("features=", features) | |
if features.contains("memory:NVMM"): | |
# Get the source bin ghost pad | |
bin_ghost_pad = source_bin.get_static_pad("src") | |
if not bin_ghost_pad.set_target(decoder_src_pad): | |
sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n") | |
else: | |
sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n") | |
def decodebin_child_added(child_proxy, Object, name, user_data): | |
print(f"Decodebin child added: {name}\n") | |
if name.find("decodebin") != -1: | |
Object.connect("child-added", decodebin_child_added, user_data) | |
def create_source_bin(index, uri): | |
print("Creating source bin") | |
# Create a source GstBin to abstract this bin's content from the rest of the | |
# pipeline | |
bin_name = "source-bin-%02d" % index | |
print(bin_name) | |
nbin = Gst.Bin.new(bin_name) | |
if not nbin: | |
sys.stderr.write(" Unable to create source bin \n") | |
# Source element for reading from the uri. | |
# We will use decodebin and let it figure out the container format of the | |
# stream and the codec and plug the appropriate demux and decode plugins. | |
uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin") | |
if not uri_decode_bin: | |
sys.stderr.write(" Unable to create uri decode bin \n") | |
# We set the input uri to the source element | |
uri_decode_bin.set_property("uri", uri) | |
# Connect to the "pad-added" signal of the decodebin which generates a | |
# callback once a new pad for raw data has beed created by the decodebin | |
uri_decode_bin.connect("pad-added", cb_newpad, nbin) | |
uri_decode_bin.connect("child-added", decodebin_child_added, nbin) | |
# We need to create a ghost pad for the source bin which will act as a proxy | |
# for the video decoder src pad. The ghost pad will not have a target right | |
# now. Once the decode bin creates the video decoder and generates the | |
# cb_newpad callback, we will set the ghost pad target to the video decoder | |
# src pad. | |
Gst.Bin.add(nbin, uri_decode_bin) | |
bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC)) | |
if not bin_pad: | |
sys.stderr.write(" Failed to add ghost pad in source bin \n") | |
return None | |
return nbin | |
def main(args): | |
# Check input arguments | |
if len(args) < 2: | |
sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) | |
sys.exit(1) | |
set_network_card() | |
for i in range(0, len(args)-1): | |
fps_streams["stream{0}".format(i)] = GETFPS(i) | |
number_sources = len(args)-1 | |
# Standard GStreamer initialization | |
GObject.threads_init() | |
Gst.init(None) | |
# Create gstreamer elements */ | |
# Create Pipeline element that will form a connection of other elements | |
print("Creating Pipeline \n ") | |
pipeline = Gst.Pipeline() | |
is_live = False | |
if not pipeline: | |
sys.stderr.write(" Unable to create Pipeline \n") | |
print("Creating streamux \n ") | |
# Create nvstreammux instance to form batches from one or more sources. | |
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") | |
if not streammux: | |
sys.stderr.write(" Unable to create NvStreamMux \n") | |
pipeline.add(streammux) | |
for i in range(number_sources): | |
print("Creating source_bin ", i, " \n ") | |
uri_name = args[i+1] | |
if uri_name.find("rtsp://") == 0: | |
is_live = True | |
source_bin = create_source_bin(i, uri_name) | |
if not source_bin: | |
sys.stderr.write(" Unable to create source bin \n") | |
pipeline.add(source_bin) | |
padname = "sink_%u" % i | |
sinkpad = streammux.get_request_pad(padname) | |
if not sinkpad: | |
sys.stderr.write(" Unable to create sink pad bin \n") | |
srcpad = source_bin.get_static_pad("src") | |
if not srcpad: | |
sys.stderr.write(" Unable to create src pad bin \n") | |
srcpad.link(sinkpad) | |
queue1 = Gst.ElementFactory.make("queue", "queue1") | |
queue2 = Gst.ElementFactory.make("queue", "queue2") | |
queue3 = Gst.ElementFactory.make("queue", "queue3") | |
queue4 = Gst.ElementFactory.make("queue", "queue4") | |
queue5 = Gst.ElementFactory.make("queue", "queue5") | |
queue6 = Gst.ElementFactory.make("queue", "queue6") | |
pipeline.add(queue1) | |
pipeline.add(queue2) | |
pipeline.add(queue3) | |
pipeline.add(queue4) | |
pipeline.add(queue5) | |
pipeline.add(queue6) | |
print("Creating Pgie \n ") | |
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") | |
if not pgie: | |
sys.stderr.write(" Unable to create pgie \n") | |
print("Creating tracker \n ") | |
tracker = Gst.ElementFactory.make("nvtracker", "tracker") | |
if not tracker: | |
sys.stderr.write(" Unable to create tracker \n") | |
print("Creating tiler \n ") | |
tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") | |
if not tiler: | |
sys.stderr.write(" Unable to create tiler \n") | |
print("Creating nvvidconv \n ") | |
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") | |
if not nvvidconv: | |
sys.stderr.write(" Unable to create nvvidconv \n") | |
print("Creating nvosd \n ") | |
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") | |
if not nvosd: | |
sys.stderr.write(" Unable to create nvosd \n") | |
nvosd.set_property('process-mode', OSD_PROCESS_MODE) | |
nvosd.set_property('display-text', OSD_DISPLAY_TEXT) | |
if is_aarch64(): | |
print("Creating transform \n ") | |
transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") | |
if not transform: | |
sys.stderr.write(" Unable to create transform \n") | |
nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") | |
if not nvvidconv_postosd: | |
sys.stderr.write(" Unable to create nvvidconv_postosd \n") | |
# Create a caps filter | |
caps = Gst.ElementFactory.make("capsfilter", "filter") | |
caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) | |
# Make the encoder | |
bitrate = 4000000 | |
codec = 'H264' | |
if codec == "H264": | |
encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") | |
print("Creating H264 Encoder") | |
elif codec == "H265": | |
encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") | |
print("Creating H265 Encoder") | |
if not encoder: | |
sys.stderr.write(" Unable to create encoder") | |
encoder.set_property('bitrate', bitrate) | |
if is_aarch64(): | |
encoder.set_property('preset-level', 1) | |
encoder.set_property('insert-sps-pps', 1) | |
encoder.set_property('bufapi-version', 1) | |
# Make the payload-encode video into RTP packets | |
if codec == "H264": | |
rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") | |
print("Creating H264 rtppay \n") | |
elif codec == "H265": | |
rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") | |
print("Creating H265 rtppay \n") | |
if not rtppay: | |
sys.stderr.write(" Unable to create rtppay") | |
if is_live: | |
print("At least one of the sources is live \n") | |
streammux.set_property('live-source', 1) | |
# Make the UDP sink | |
updsink_port_num = 5400 | |
sink = Gst.ElementFactory.make("udpsink", "udpsink") | |
if not sink: | |
sys.stderr.write(" Unable to create udpsink") | |
sink.set_property('host', '224.224.255.255') | |
sink.set_property('port', updsink_port_num) | |
sink.set_property('async', False) | |
sink.set_property('sync', 0) | |
streammux.set_property('gpu_id', 0) | |
streammux.set_property('enable-padding', 0) | |
streammux.set_property('nvbuf-memory-type', 0) | |
streammux.set_property('width', 1024) | |
streammux.set_property('height', 768) | |
streammux.set_property('batch-size', 1) | |
streammux.set_property('batched-push-timeout', 40000) | |
pgie.set_property('config-file-path', 'config/pgie_config.txt') | |
pgie.set_property('batch-size', 1) | |
# Set properties of tracker | |
config = configparser.ConfigParser() | |
config.read('config/tracker_config.txt') | |
config.sections() | |
for key in config['tracker']: | |
if key == 'tracker-width': | |
tracker_width = config.getint('tracker', key) | |
tracker.set_property('tracker-width', tracker_width) | |
if key == 'tracker-height': | |
tracker_height = config.getint('tracker', key) | |
tracker.set_property('tracker-height', tracker_height) | |
if key == 'gpu-id': | |
tracker_gpu_id = config.getint('tracker', key) | |
tracker.set_property('gpu_id', tracker_gpu_id) | |
if key == 'll-lib-file': | |
tracker_ll_lib_file = config.get('tracker', key) | |
tracker.set_property('ll-lib-file', tracker_ll_lib_file) | |
if key == 'll-config-file': | |
tracker_ll_config_file = config.get('tracker', key) | |
tracker.set_property('ll-config-file', tracker_ll_config_file) | |
if key == 'enable-batch-process': | |
tracker_enable_batch_process = config.getint('tracker', key) | |
tracker.set_property('enable_batch_process', tracker_enable_batch_process) | |
if key == 'enable-past-frame': | |
tracker_enable_past_frame = config.getint('tracker', key) | |
tracker.set_property('enable_past_frame', tracker_enable_past_frame) | |
tiler_rows = int(math.sqrt(number_sources)) | |
tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) | |
tiler.set_property("rows", tiler_rows) | |
tiler.set_property("columns", tiler_columns) | |
tiler.set_property("width", TILED_OUTPUT_WIDTH) | |
tiler.set_property("height", TILED_OUTPUT_HEIGHT) | |
sink.set_property("qos", 0) | |
print("Adding elements to Pipeline \n") | |
pipeline.add(pgie) | |
pipeline.add(tracker) | |
pipeline.add(tiler) | |
pipeline.add(nvvidconv) | |
pipeline.add(nvosd) | |
pipeline.add(nvvidconv_postosd) | |
pipeline.add(caps) | |
pipeline.add(encoder) | |
pipeline.add(rtppay) | |
pipeline.add(sink) | |
print("Linking elements in the Pipeline \n") | |
streammux.link(queue1) | |
queue1.link(pgie) | |
pgie.link(queue2) | |
queue2.link(tracker) | |
tracker.link(queue3) | |
queue3.link(tiler) | |
tiler.link(queue4) | |
queue4.link(nvvidconv) | |
nvvidconv.link(queue5) | |
queue5.link(nvosd) | |
nvosd.link(queue6) | |
queue6.link(nvvidconv_postosd) | |
nvvidconv_postosd.link(caps) | |
caps.link(encoder) | |
encoder.link(rtppay) | |
rtppay.link(sink) | |
# create an event loop and feed gstreamer bus messages to it | |
loop = GObject.MainLoop() | |
bus = pipeline.get_bus() | |
bus.add_signal_watch() | |
bus.connect("message", bus_call, loop) | |
tiler_src_pad = pgie.get_static_pad("src") | |
if not tiler_src_pad: | |
sys.stderr.write(" Unable to get src pad \n") | |
else: | |
tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) | |
# List the sources | |
print("Now playing...") | |
for i, source in enumerate(args): | |
if i != 0: | |
print(i, ": ", source) | |
# Start streaming | |
rtsp_port_num = 8554 | |
server = GstRtspServer.RTSPServer.new() | |
server.props.service = f"{rtsp_port_num}" | |
server.attach(None) | |
factory = GstRtspServer.RTSPMediaFactory.new() | |
factory.set_launch( | |
"( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % ( | |
updsink_port_num, codec)) | |
factory.set_shared(True) | |
server.get_mount_points().add_factory("/ds-test", factory) | |
print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num) | |
# start play back and listen to events | |
print("Starting pipeline \n") | |
pipeline.set_state(Gst.State.PLAYING) | |
try: | |
loop.run() | |
except: | |
pass | |
# cleanup | |
pipeline.set_state(Gst.State.NULL) | |
if __name__ == '__main__': | |
sys.exit(main(sys.argv)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
################################################################################ | |
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. | |
# | |
# Permission is hereby granted, free of charge, to any person obtaining a | |
# copy of this software and associated documentation files (the "Software"), | |
# to deal in the Software without restriction, including without limitation | |
# the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
# and/or sell copies of the Software, and to permit persons to whom the | |
# Software is furnished to do so, subject to the following conditions: | |
# | |
# The above copyright notice and this permission notice shall be included in | |
# all copies or substantial portions of the Software. | |
# | |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
# DEALINGS IN THE SOFTWARE. | |
################################################################################ | |
# Following properties are mandatory when engine files are not specified: | |
# int8-calib-file(Only in INT8) | |
# Caffemodel mandatory properties: model-file, proto-file, output-blob-names | |
# UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names | |
# ONNX: onnx-file | |
# | |
# Mandatory properties for detectors: | |
# num-detected-classes | |
# | |
# Optional properties for detectors: | |
# cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0) | |
# custom-lib-path | |
# parse-bbox-func-name | |
# | |
# Mandatory properties for classifiers: | |
# classifier-threshold, is-classifier | |
# | |
# Optional properties for classifiers: | |
# classifier-async-mode(Secondary mode only, Default=false) | |
# | |
# Optional properties in secondary mode: | |
# operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), | |
# input-object-min-width, input-object-min-height, input-object-max-width, | |
# input-object-max-height | |
# | |
# Following properties are always recommended: | |
# batch-size(Default=1) | |
# | |
# Other optional properties: | |
# net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), | |
# model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, | |
# mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary), | |
# custom-lib-path, network-mode(Default=0 i.e FP32) | |
# | |
# The values in the config file are overridden by values set through GObject | |
# properties. | |
[property] | |
gpu-id=0 | |
net-scale-factor=0.0039215697906911373 | |
tlt-model-key=tlt_encode | |
tlt-encoded-model=peoplenet/resnet34_peoplenet_pruned.etlt | |
model-engine-file=peoplenet/resnet34_peoplenet_pruned.etlt_b1_gpu0_int8.engine | |
labelfile-path=peoplenet/labels.txt | |
int8-calib-file=/opt/nvidia/deepstream/deepstream-5.1/samples/models/Primary_Detector/cal_trt.bin | |
force-implicit-batch-dim=1 | |
batch-size=1 | |
process-mode=1 | |
model-color-format=0 | |
network-mode=1 | |
num-detected-classes=3 | |
filter-out-class-ids=1;2 | |
interval=0 | |
gie-unique-id=1 | |
output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid | |
[class-attrs-all] | |
pre-cluster-threshold=0.4 | |
## Set eps=0.7 and minBoxes for cluster-mode=1(DBSCAN) | |
eps=0.7 | |
minBoxes=1 | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
################################################################################ | |
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. | |
# | |
# Permission is hereby granted, free of charge, to any person obtaining a | |
# copy of this software and associated documentation files (the "Software"), | |
# to deal in the Software without restriction, including without limitation | |
# the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
# and/or sell copies of the Software, and to permit persons to whom the | |
# Software is furnished to do so, subject to the following conditions: | |
# | |
# The above copyright notice and this permission notice shall be included in | |
# all copies or substantial portions of the Software. | |
# | |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
# DEALINGS IN THE SOFTWARE. | |
################################################################################ | |
[tracker] | |
enable=1 | |
tracker-width=640 | |
tracker-height=288 | |
ll-lib-file=/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_nvdcf.so | |
#ll-config-file required for DCF/IOU only | |
ll-config-file=tracker_config.yml | |
#ll-config-file=iou_config.txt | |
gpu-id=0 | |
#enable-batch-process applicable to DCF only | |
enable-batch-process=0 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment