Skip to content

Instantly share code, notes, and snippets.

@MRo47
Last active July 23, 2023 08:12
Show Gist options
  • Save MRo47/aecd9be1b58455f29b5c1544703589ae to your computer and use it in GitHub Desktop.
Save MRo47/aecd9be1b58455f29b5c1544703589ae to your computer and use it in GitHub Desktop.
Retriving aligned depth and color images from realsense 450i for opencv
#include <opencv2/opencv.hpp>
#include <iostream>
#include <librealsense2/rs.hpp>
#include <cstring>
#include <algorithm>
/*Modded from https://github.com/IntelRealSense/librealsense/issues/2552#issuecomment-431075931
Realsense Customer Engineering Team Comment on fu3lab's issue*/
float get_depth_scale(rs2::device dev)
{
// Go over the device's sensors
for (rs2::sensor& sensor : dev.query_sensors())
{
// Check if the sensor if a depth sensor
if (rs2::depth_sensor dpt = rs2::depth_sensor(sensor))
{
return dpt.get_depth_scale();
}
}
throw std::runtime_error("Device does not have a depth sensor");
}
void remove_background(rs2::video_frame& other_frame, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist)
{
const uint16_t* p_depth_frame = reinterpret_cast<const uint16_t*>(depth_frame.get_data());
uint8_t* p_other_frame = reinterpret_cast<uint8_t*>(const_cast<void*>(other_frame.get_data()));
int width = other_frame.get_width();
int height = other_frame.get_height();
int other_bpp = other_frame.get_bytes_per_pixel();
//#pragma omp parallel for schedule(dynamic) //Using OpenMP to try to parallelise the loop
for (int y = 0; y < height; y++)
{
auto depth_pixel_index = y * width;
for (int x = 0; x < width; x++, ++depth_pixel_index)
{
// Get the depth value of the current pixel
auto pixels_distance = depth_scale * p_depth_frame[depth_pixel_index];
// Check if the depth value is invalid (<=0) or greater than the threashold
if (pixels_distance <= 0.f || pixels_distance > clipping_dist)
{
// Calculate the offset in other frame's buffer to current pixel
auto offset = depth_pixel_index * other_bpp;
// Set pixel to "background" color (0x999999)
std::memset(&p_other_frame[offset], 0x01, other_bpp);
}
}
}
}
rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams)
{
//Given a vector of streams, we try to find a depth stream and another stream to align depth with.
//We prioritize color streams to make the view look better.
//If color is not available, we take another stream that (other than depth)
rs2_stream align_to = RS2_STREAM_ANY;
bool depth_stream_found = false;
bool color_stream_found = false;
for (rs2::stream_profile sp : streams)
{
rs2_stream profile_stream = sp.stream_type();
if (profile_stream != RS2_STREAM_DEPTH)
{
if (!color_stream_found) //Prefer color
align_to = profile_stream;
if (profile_stream == RS2_STREAM_COLOR)
{
color_stream_found = true;
}
}
else
{
depth_stream_found = true;
}
}
if (!depth_stream_found)
throw std::runtime_error("No Depth stream available");
if (align_to == RS2_STREAM_ANY)
throw std::runtime_error("No stream found to align with Depth");
return align_to;
}
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev)
{
for(auto&& sp : prev)
{
//If previous profile is in current (maybe just added another)
auto itr = std::find_if(std::begin(current), std::end(current), [&sp](const rs2::stream_profile& current_sp) { return sp.unique_id() == current_sp.unique_id(); });
if (itr == std::end(current)) //If it previous stream wasn't found in current
{
return true;
}
}
return false;
}
int main(int argc, char * argv[]) try
{
// Create and initialize GUI related objects
// window app(1280, 720, "CPP - Align Example"); // Simple window handling // ImGui library intializition
rs2::colorizer c; // Helper to colorize depth images
// texture renderer; // Helper for renderig images
// Create a pipeline to easily configure and start the camera
rs2::pipeline pipe;
//Calling pipeline's start() without any additional parameters will start the first device
// with its default streams.
//The start function returns the pipeline profile which the pipeline used to start the device
rs2::config cfg;
cfg.enable_stream(rs2_stream::RS2_STREAM_DEPTH, 1280, 720, rs2_format::RS2_FORMAT_Z16);
//change the color format to BGR8 for opencv
cfg.enable_stream(rs2_stream::RS2_STREAM_COLOR, 1280, 720, rs2_format::RS2_FORMAT_BGR8);
rs2::pipeline_profile profile = pipe.start(cfg);
// Each depth camera might have different units for depth pixels, so we get it here
// Using the pipeline's profile, we can retrieve the device that the pipeline uses
float depth_scale = get_depth_scale(profile.get_device());
//Pipeline could choose a device that does not have a color stream
//If there is no color stream, choose to align depth to another stream
rs2_stream align_to = find_stream_to_align(profile.get_streams());
// Create a rs2::align object.
// rs2::align allows us to perform alignment of depth frames to others frames
//The "align_to" is the stream type to which we plan to align depth frames.
rs2::align align(align_to);
// Define a variable for controlling the distance to clip
float depth_clipping_distance = 10000.f;
while (true) // Application still alive?
{
// Using the align object, we block the application until a frameset is available
rs2::frameset frameset = pipe.wait_for_frames();
// rs2::pipeline::wait_for_frames() can replace the device it uses in case of device error or disconnection.
// Since rs2::align is aligning depth to some other stream, we need to make sure that the stream was not changed
// after the call to wait_for_frames();
if (profile_changed(pipe.get_active_profile().get_streams(), profile.get_streams()))
{
//If the profile was changed, update the align object, and also get the new device's depth scale
profile = pipe.get_active_profile();
align_to = find_stream_to_align(profile.get_streams());
align = rs2::align(align_to);
depth_scale = get_depth_scale(profile.get_device());
}
//Get processed aligned frame
auto processed = align.process(frameset);
// Trying to get both other and aligned depth frames
rs2::video_frame other_frame = processed.first(align_to);
rs2::depth_frame aligned_depth_frame = processed.get_depth_frame();
//If one of them is unavailable, continue iteration
if (!aligned_depth_frame || !other_frame)
{
continue;
}
// Passing both frames to remove_background so it will "strip" the background
// NOTE: in this example, we alter the buffer of the other frame, instead of copying it and altering the copy
// This behavior is not recommended in real application since the other frame could be used elsewhere
remove_background(other_frame, aligned_depth_frame, depth_scale, depth_clipping_distance);
const int w_other = other_frame.get_width();
const int h_other = other_frame.get_height();
rs2::video_frame depth_color=c.process(aligned_depth_frame);
const int w_depth = depth_color.get_width();
const int h_depth = depth_color.get_height();
std::cout << "other frame\n";
std::cout << "w = " << w_other << " h = " << h_other <<'\n';
std::cout << "stride = " << other_frame.get_stride_in_bytes() << '\n';
std::cout << "bits per pixel = " << other_frame.get_bits_per_pixel() << '\n';
std::cout << "depth frame\n";
std::cout << "w = " << w_depth << " h = " << h_depth <<'\n';
std::cout << "stride = " << depth_color.get_stride_in_bytes() << '\n';
std::cout << "bits per pixel = " << depth_color.get_bits_per_pixel() << '\n';
cv::Mat image_frame(cv::Size(w_other, h_other), CV_8UC3, (void*)other_frame.get_data(), cv::Mat::AUTO_STEP);
// cv::Mat BGR_img;
cv::Mat image_depth(cv::Size(w_depth, h_depth), CV_8UC3, (void*)depth_color.get_data(), cv::Mat::AUTO_STEP);
cv::namedWindow( "Image window", cv::WINDOW_AUTOSIZE ); // Create a window for display.
cv::imshow( "Image window", image_frame ); // Show our image inside it.
cv::namedWindow( "Depth window", cv::WINDOW_AUTOSIZE ); // Create a window for display.
cv::imshow( "Depth window", image_depth ); // Show our image inside it.
if((char)cv::waitKey(25)==27)
break;
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception & e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment