Skip to content

Instantly share code, notes, and snippets.

@ialhashim
Last active June 24, 2022 14:38
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ialhashim/439258c9fe51913023565e87acff974f to your computer and use it in GitHub Desktop.
Save ialhashim/439258c9fe51913023565e87acff974f to your computer and use it in GitHub Desktop.
Modifications on the Agora on-premise SDK to enable AI applications
//
// Step 3: In the file 'AgoraSdk.cpp'
//
void AgoraSdk::videoFrameReceivedImpl(
agora::linuxsdk::uid_t uid,
const agora::linuxsdk::VideoFrame *pframe) const {
char uidbuf[65];
snprintf(uidbuf, sizeof(uidbuf), "%u", uid);
const char *suffix = ".vtmp";
//
// HERE WE ADD NEW CODE
//
if (pframe->type == agora::linuxsdk::VIDEO_FRAME_RAW_YUV) {
suffix = ".yuv";
agora::linuxsdk::VideoYuvFrame *f = pframe->frame.yuv;
// Defined my own data structure to store the whole buffer and details about the video frame
MyFrameYuv frame;
frame.bufSize_ = f->bufSize_;
frame.frame_ms_ = f->frame_ms_;
frame.height_ = f->height_;
frame.width_ = f->width_;
frame.ystride_ = f->ystride_;
frame.ustride_ = f->ustride_;
frame.vstride_ = f->vstride_;
frame.buf_.resize(frame.bufSize_);
memcpy(&frame.buf_[0], f->buf_, f->bufSize_);
// Copy to our modified recorder object
std::string suid = std::to_string(uid);
AgoraSdk *recorder = const_cast<AgoraSdk *>(this);
recorder->framesYuv[suid] = frame;
return;
} else if (pframe->type == agora::linuxsdk::VIDEO_FRAME_JPG) {
// Remaing parts of the code...
//
// On the main thread (I use this in the original 'main.cpp')
// I look at the frames for each user ID stored
// Convert to BGR (or RGB) and then display it with OpenCV image viewer
// At this point you have access to the video frames and can use them in
// PyTorch or whatever AI model.
//
while (!recorder.stopped() && !g_bSignalStop) {
if(g_bSignalStartService) {
recorder.startService();
g_bSignalStartService = false;
}
if(g_bSignalStopService) {
recorder.stopService();
g_bSignalStopService = false;
}
std::this_thread::sleep_for(std::chrono::milliseconds(32));
for(auto & f : recorder.framesYuv){
auto uid = f.first;
auto frame = f.second;
if (cvGetWindowHandle(uid.c_str()) == nullptr){
namedWindow((uid+"_bgr").c_str(), WINDOW_AUTOSIZE);
}
cv::Mat imageU, imageV, imageYUV;
std::vector<Mat> imageYUV_vec;
cv::Mat imageBGR_padded, imageRGB_padded, imageBGR, imageRGB;
// Extract raw Y,U,V channels
unsigned char * ybuff = &frame.buf_[0];
cv::Mat imageY((int)frame.height_, (int)frame.ystride_, CV_8UC1, ybuff);
unsigned int ybuff_length = frame.height_ * frame.ystride_;
unsigned char * ubuff = ybuff + ybuff_length;
cv::Mat imageU_src(frame.height_/2, (int)frame.ustride_, CV_8UC1, ubuff);
unsigned int ubuff_length = ybuff_length + (frame.height_/2 * frame.ustride_);
unsigned char * vbuff = ybuff + ubuff_length;
cv::Mat imageV_src(frame.height_/2, (int)frame.vstride_, CV_8UC1, vbuff);
// Resize U,V to Y size
cv::resize(imageU_src, imageU, cv::Size((int)frame.ystride_, (int)frame.height_), cv::INTER_LINEAR);
cv::resize(imageV_src, imageV, cv::Size((int)frame.ystride_, (int)frame.height_), cv::INTER_LINEAR);
// Merge and convert to BGR
imageYUV_vec.push_back(imageY);
imageYUV_vec.push_back(imageU);
imageYUV_vec.push_back(imageV);
cv::merge(imageYUV_vec, imageYUV);
cv::cvtColor(imageYUV, imageBGR_padded, cv::COLOR_YUV2BGR);
cv::cvtColor(imageBGR_padded, imageRGB_padded, cv::COLOR_YUV2BGR);
// Crop and convert to RGB
imageBGR = imageBGR_padded(cv::Rect(0, 0, (int)frame.width_, (int)frame.height_));
cv::cvtColor(imageBGR, imageRGB, cv::COLOR_YUV2BGR);
// Display
imshow((uid+"_bgr").c_str(), imageBGR);
//cv::imwrite("test.jpg", imageBGR);
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment