Skip to content

Instantly share code, notes, and snippets.

@marcelherd
Created October 24, 2019 10:03
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save marcelherd/67be9734ec018626b7d7b2d9c42b86a8 to your computer and use it in GitHub Desktop.
Save marcelherd/67be9734ec018626b7d7b2d9c42b86a8 to your computer and use it in GitHub Desktop.
// License: Apache 2.0. See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
#include <librealsense2/rs.hpp> // Include RealSense Cross Platform API
#include <opencv2/opencv.hpp> // Include OpenCV API
#include "cv-helpers.hpp" // Helper functions for conversions between RealSense and OpenCV
int main(int argc, char* argv[]) try
{
using namespace cv;
using namespace rs2;
// Define colorizer and align processing-blocks
colorizer colorize;
align align_to(RS2_STREAM_COLOR);
// Start the camera
pipeline pipe;
//pipe.start();
// Populate pipeline with pre-recorded data
config cfg;
cfg.enable_device_from_file("D:\\outdoors.bag");
pipe.start(cfg);
const auto window_name = "Display Image";
namedWindow(window_name, WINDOW_AUTOSIZE);
// Create VideoWriter to write output to UDPsink on port 8553
VideoWriter out("appsrc ! videoconvert ! x264enc tune=zerolatency ! rtph264pay pt=96 ! udpsink host=127.0.0.1 port=8553", CAP_GSTREAMER, 0, 30, Size(640, 480), true);
// We are using StructuringElement for erode / dilate operations
auto gen_element = [](int erosion_size)
{
return getStructuringElement(MORPH_RECT,
Size(erosion_size + 1, erosion_size + 1),
Point(erosion_size, erosion_size));
};
const int erosion_size = 3;
auto erode_less = gen_element(erosion_size);
auto erode_more = gen_element(erosion_size * 2);
// The following operation is taking grayscale image,
// performs threashold on it, closes small holes and erodes the white area
auto create_mask_from_depth = [&](Mat& depth, int thresh, ThresholdTypes type)
{
threshold(depth, depth, thresh, 255, type);
dilate(depth, depth, erode_less);
erode(depth, depth, erode_more);
};
// Skips some frames to allow for auto-exposure stabilization
for (int i = 0; i < 10; i++) pipe.wait_for_frames();
while (waitKey(1) < 0 && getWindowProperty(window_name, WND_PROP_AUTOSIZE) >= 0)
{
frameset data = pipe.wait_for_frames();
// Make sure the frameset is spatialy aligned
// (each pixel in depth image corresponds to the same pixel in the color image)
frameset aligned_set = align_to.process(data);
frame depth = aligned_set.get_depth_frame();
auto color_mat = frame_to_mat(aligned_set.get_color_frame());
// Colorize depth image with white being near and black being far
// This will take advantage of histogram equalization done by the colorizer
colorize.set_option(RS2_OPTION_COLOR_SCHEME, 2);
frame bw_depth = depth.apply_filter(colorize);
// Generate "near" mask image:
auto near = frame_to_mat(bw_depth);
cvtColor(near, near, COLOR_BGR2GRAY);
// Take just values within range [180-255]
// These will roughly correspond to near objects due to histogram equalization
create_mask_from_depth(near, 180, THRESH_BINARY);
// Generate "far" mask image:
auto far = frame_to_mat(bw_depth);
cvtColor(far, far, COLOR_BGR2GRAY);
far.setTo(255, far == 0); // Note: 0 value does not indicate pixel near the camera, and requires special attention
create_mask_from_depth(far, 100, THRESH_BINARY_INV);
// GrabCut algorithm needs a mask with every pixel marked as either:
// BGD, FGB, PR_BGD, PR_FGB
Mat mask;
mask.create(near.size(), CV_8UC1);
mask.setTo(Scalar::all(GC_BGD)); // Set "background" as default guess
mask.setTo(GC_PR_BGD, far == 0); // Relax this to "probably background" for pixels outside "far" region
mask.setTo(GC_FGD, near == 255); // Set pixels within the "near" region to "foreground"
// Run Grab-Cut algorithm:
Mat bgModel, fgModel;
grabCut(color_mat, mask, Rect(), bgModel, fgModel, 1, GC_INIT_WITH_MASK);
// Extract foreground pixels based on refined mask from the algorithm
Mat3b foreground = Mat3b::zeros(color_mat.rows, color_mat.cols);
color_mat.copyTo(foreground, (mask == GC_FGD) | (mask == GC_PR_FGD));
imshow(window_name, foreground);
// Write to UDP sink
out.write(foreground);
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception & e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
/* GStreamer
* Copyright (C) 2008 Wim Taymans <wim.taymans at gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include <gst/gst.h>
#include <gst/rtsp-server/rtsp-server.h>
#define DEFAULT_RTSP_PORT "8554"
static char* port = (char*)DEFAULT_RTSP_PORT;
static GOptionEntry entries[] = {
{"port", 'p', 0, G_OPTION_ARG_STRING, &port,
"Port to listen on (default: " DEFAULT_RTSP_PORT ")", "PORT"},
{NULL}
};
int
main(int argc, char* argv[])
{
GMainLoop* loop;
GstRTSPServer* server;
GstRTSPMountPoints* mounts;
GstRTSPMediaFactory* factory;
GOptionContext* optctx;
GError* error = NULL;
optctx = g_option_context_new("<launch line> - Test RTSP Server, Launch\n\n"
"Example: \"( videotestsrc ! x264enc ! rtph264pay name=pay0 pt=96 )\"");
g_option_context_add_main_entries(optctx, entries, NULL);
g_option_context_add_group(optctx, gst_init_get_option_group());
if (!g_option_context_parse(optctx, &argc, &argv, &error)) {
g_printerr("Error parsing options: %s\n", error->message);
g_option_context_free(optctx);
g_clear_error(&error);
return -1;
}
g_option_context_free(optctx);
loop = g_main_loop_new(NULL, FALSE);
/* create a server instance */
server = gst_rtsp_server_new();
g_object_set(server, "service", port, NULL);
/* get the mount points for this server, every server has a default object
* that be used to map uri mount points to media factories */
mounts = gst_rtsp_server_get_mount_points(server);
/* make a media factory for a test stream. The default media factory can use
* gst-launch syntax to create pipelines.
* any launch line works as long as it contains elements named pay%d. Each
* element with pay%d names will be a stream */
factory = gst_rtsp_media_factory_new();
gst_rtsp_media_factory_set_launch(factory, "udpsrc port=8553 ! application/x-rtp, payload=96 ! rtpjitterbuffer ! rtph264depay ! avdec_h264 ! x264enc tune=zerolatency ! rtph264pay name=pay0 pt=96");
gst_rtsp_media_factory_set_shared(factory, TRUE);
/* attach the test factory to the /test url */
gst_rtsp_mount_points_add_factory(mounts, "/realsense", factory);
/* don't need the ref to the mapper anymore */
g_object_unref(mounts);
/* attach the server to the default maincontext */
gst_rtsp_server_attach(server, NULL);
/* start serving */
g_print("stream ready at rtsp://127.0.0.1:%s/realsense\n", port);
g_main_loop_run(loop);
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment