Skip to content

Instantly share code, notes, and snippets.

@ialhashim
Last active August 26, 2020 11:00
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ialhashim/1c2bb7cba30733ad7f5e9feff7d91001 to your computer and use it in GitHub Desktop.
Save ialhashim/1c2bb7cba30733ad7f5e9feff7d91001 to your computer and use it in GitHub Desktop.
Azure Kinect basic capturing and visualization on Windows
/*
The most basic example to capture matching RGB and Depth images from an Azure Kinect DK device.
CImg is used to visualize the matching RGB and Depth.
*/
#pragma comment(lib, "gdi32.lib")
#pragma comment(lib, "user32.lib")
#pragma comment(lib, "shell32.lib")
#include <k4a/k4a.h>
#pragma comment(lib, "k4a.lib")
#include "CImg.h"
using namespace cimg_library;
/// Capture (RGB, Depth, etc.) from the sensor
int capture_kinect(k4a_device_t &device, k4a_transformation_t& transform, CImg<uint8_t>& imgRGB, CImg<uint16_t>& imgDEPTH){
k4a_capture_t capture = NULL;
const int32_t TIMEOUT_IN_MS = 1000;
switch (k4a_device_get_capture(device, &capture, K4A_WAIT_INFINITE))
{
case K4A_WAIT_RESULT_SUCCEEDED:
//printf("K4A_WAIT_RESULT_SUCCEEDED\n");
break;
case K4A_WAIT_RESULT_TIMEOUT:
printf("Timed out waiting for a capture\n");
return 1;
case K4A_WAIT_RESULT_FAILED:
printf("Failed to read a capture\n");
return 1;
}
int width_pixels = 0, height_pixels = 0, stride_pixels = 0, index = 0;
// Probe for a color image
k4a_image_t imageBGRA32 = k4a_capture_get_color_image(capture);
if (imageBGRA32)
{
width_pixels = k4a_image_get_width_pixels(imageBGRA32);
height_pixels = k4a_image_get_height_pixels(imageBGRA32);
stride_pixels = k4a_image_get_stride_bytes(imageBGRA32);
auto buffer = k4a_image_get_buffer(imageBGRA32);
imgRGB = CImg<uint8_t>(width_pixels, height_pixels, 1, 3);
int line_width = width_pixels * height_pixels;
for (int py = 0; py < height_pixels; py++) {
for (int px = 0; px < width_pixels; px++) {
index = (py * width_pixels * 4) + (4 * px);
imgRGB._data[px + py * width_pixels ] = buffer[index + 2];
imgRGB._data[px + py * width_pixels + line_width] = buffer[index + 1];
imgRGB._data[px + py * width_pixels + 2 * line_width] = buffer[index + 0];
}
}
// Release the image
k4a_image_release(imageBGRA32);
} else { printf("No RGB.\n"); return 1; }
// Access the depth16 image
k4a_image_t imageDEPTH = k4a_capture_get_depth_image(capture);
if (imageDEPTH != NULL)
{
} else { printf("No depth.\n"); return 1; }
// Transform Depth to RGB 2D space
k4a_image_t k4a_transformed_depth;
k4a_image_create(
K4A_IMAGE_FORMAT_DEPTH16,
width_pixels,
height_pixels,
0,
&k4a_transformed_depth
);
// Transform Depth to image space
k4a_transformation_depth_image_to_color_camera(transform, imageDEPTH, k4a_transformed_depth);
// Copy to CImg
auto depthImg = reinterpret_cast<uint16_t*>(k4a_image_get_buffer(k4a_transformed_depth));
imgDEPTH = CImg<uint16_t>(depthImg, width_pixels, height_pixels);
// Clean up depth
k4a_image_release(imageDEPTH);
// Release transformed depth
k4a_image_release(k4a_transformed_depth);
// Release the capture
k4a_capture_release(capture);
return 0;
}
int finish_kinect(k4a_device_t &device){
k4a_device_stop_cameras(device);
k4a_device_close(device);
return 0;
}
int prepare_kinect(k4a_device_t &device, k4a_device_configuration_t &config, k4a_transformation_t& transform){
uint32_t count = k4a_device_get_installed_count();
if (count == 0)
{
printf("No k4a devices attached!\n");
return 1;
}
// Open the first plugged in Kinect device
if (K4A_FAILED(k4a_device_open(K4A_DEVICE_DEFAULT, &device)))
{
printf("Failed to open k4a device!\n");
return 1;
}
// Get the size of the serial number
size_t serial_size = 0;
k4a_device_get_serialnum(device, NULL, &serial_size);
// Allocate memory for the serial, then acquire it
char* serial = (char*)(malloc(serial_size));
k4a_device_get_serialnum(device, serial, &serial_size);
printf("Opened device: %s\n", serial);
free(serial);
config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32;
config.color_resolution = K4A_COLOR_RESOLUTION_1536P;
config.depth_mode = K4A_DEPTH_MODE_WFOV_UNBINNED;
config.camera_fps = K4A_FRAMES_PER_SECOND_15;
config.synchronized_images_only = true;
// Start the camera with the given configuration
if (K4A_FAILED(k4a_device_start_cameras(device, &config))){
printf("Failed to start cameras!\n");
k4a_device_close(device);
return 1;
}
// Depth to image transformation
k4a_calibration_t calibration;
k4a_device_get_calibration(device, config.depth_mode, config.color_resolution, &calibration);
transform = k4a_transformation_create(&calibration);
return 0;
}
/*======================
Main
=======================*/
int main(int argc, char** argv) {
k4a_device_t device = NULL;
k4a_device_configuration_t config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL;
k4a_transformation_t transform;
if( prepare_kinect(device, config, transform) ) return 1;
// Buffers that hold RGB and Depth
CImg<uint8_t> imgRGB(2048, 1536, 1, 3);
CImg<uint16_t> imgDEPTH(2048, 1536, 1, 1);
// Visualization
CImgDisplay dispRGB(imgRGB, "RGB");
dispRGB.resize(640, 480);
dispRGB.move(100, 100);
CImgDisplay dispDepth(imgDEPTH, "DEPTH");
dispDepth.resize(640, 480);
dispDepth.move(100+650, 100);
dispDepth.set_normalization(0);
// Begin main interaction loop.
while (!dispRGB.is_closed() && !dispRGB.is_keyQ() && !dispRGB.is_keyESC()) {
capture_kinect(device, transform, imgRGB, imgDEPTH);
dispRGB.display(imgRGB);
dispDepth.display(CImg<uint8_t>(255.0f * CImg<float>(imgDEPTH.cut(0, 5000)) / 5000.0f));
//imgRGB.get_normalize(0,255).save_tiff("frameRGB.tif");
//imgDEPTH.get_normalize(0,255).save_tiff("frameDEPTH.tif");
}
finish_kinect(device);
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment