-
-
Save yohhoy/52b31522dbb751e5296e to your computer and use it in GitHub Desktop.
/* | |
* Convert from OpenCV image and write movie with FFmpeg | |
* | |
* Copyright (c) 2016 yohhoy | |
*/ | |
#include <iostream> | |
#include <vector> | |
// FFmpeg | |
extern "C" { | |
#include <libavformat/avformat.h> | |
#include <libavcodec/avcodec.h> | |
#include <libavutil/avutil.h> | |
#include <libavutil/pixdesc.h> | |
#include <libswscale/swscale.h> | |
} | |
// OpenCV | |
#include <opencv2/opencv.hpp> | |
#include <opencv2/highgui.hpp> | |
int main(int argc, char* argv[]) | |
{ | |
if (argc < 2) { | |
std::cout << "Usage: cv2ff <outfile>" << std::endl; | |
return 1; | |
} | |
const char* outfile = argv[1]; | |
// initialize FFmpeg library | |
av_register_all(); | |
// av_log_set_level(AV_LOG_DEBUG); | |
int ret; | |
const int dst_width = 640; | |
const int dst_height = 480; | |
const AVRational dst_fps = {30, 1}; | |
// initialize OpenCV capture as input frame generator | |
cv::VideoCapture cvcap(0); | |
if (!cvcap.isOpened()) { | |
std::cerr << "fail to open cv::VideoCapture"; | |
return 2; | |
} | |
cvcap.set(cv::CAP_PROP_FRAME_WIDTH, dst_width); | |
cvcap.set(cv::CAP_PROP_FRAME_HEIGHT, dst_height); | |
// allocate cv::Mat with extra bytes (required by AVFrame::data) | |
std::vector<uint8_t> imgbuf(dst_height * dst_width * 3 + 16); | |
cv::Mat image(dst_height, dst_width, CV_8UC3, imgbuf.data(), dst_width * 3); | |
// open output format context | |
AVFormatContext* outctx = nullptr; | |
ret = avformat_alloc_output_context2(&outctx, nullptr, nullptr, outfile); | |
if (ret < 0) { | |
std::cerr << "fail to avformat_alloc_output_context2(" << outfile << "): ret=" << ret; | |
return 2; | |
} | |
// open output IO context | |
ret = avio_open2(&outctx->pb, outfile, AVIO_FLAG_WRITE, nullptr, nullptr); | |
if (ret < 0) { | |
std::cerr << "fail to avio_open2: ret=" << ret; | |
return 2; | |
} | |
// create new video stream | |
AVCodec* vcodec = avcodec_find_encoder(outctx->oformat->video_codec); | |
AVStream* vstrm = avformat_new_stream(outctx, vcodec); | |
if (!vstrm) { | |
std::cerr << "fail to avformat_new_stream"; | |
return 2; | |
} | |
avcodec_get_context_defaults3(vstrm->codec, vcodec); | |
vstrm->codec->width = dst_width; | |
vstrm->codec->height = dst_height; | |
vstrm->codec->pix_fmt = vcodec->pix_fmts[0]; | |
vstrm->codec->time_base = vstrm->time_base = av_inv_q(dst_fps); | |
vstrm->r_frame_rate = vstrm->avg_frame_rate = dst_fps; | |
if (outctx->oformat->flags & AVFMT_GLOBALHEADER) | |
vstrm->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; | |
// open video encoder | |
ret = avcodec_open2(vstrm->codec, vcodec, nullptr); | |
if (ret < 0) { | |
std::cerr << "fail to avcodec_open2: ret=" << ret; | |
return 2; | |
} | |
std::cout | |
<< "outfile: " << outfile << "\n" | |
<< "format: " << outctx->oformat->name << "\n" | |
<< "vcodec: " << vcodec->name << "\n" | |
<< "size: " << dst_width << 'x' << dst_height << "\n" | |
<< "fps: " << av_q2d(dst_fps) << "\n" | |
<< "pixfmt: " << av_get_pix_fmt_name(vstrm->codec->pix_fmt) << "\n" | |
<< std::flush; | |
// initialize sample scaler | |
SwsContext* swsctx = sws_getCachedContext( | |
nullptr, dst_width, dst_height, AV_PIX_FMT_BGR24, | |
dst_width, dst_height, vstrm->codec->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr); | |
if (!swsctx) { | |
std::cerr << "fail to sws_getCachedContext"; | |
return 2; | |
} | |
// allocate frame buffer for encoding | |
AVFrame* frame = av_frame_alloc(); | |
std::vector<uint8_t> framebuf(avpicture_get_size(vstrm->codec->pix_fmt, dst_width, dst_height)); | |
avpicture_fill(reinterpret_cast<AVPicture*>(frame), framebuf.data(), vstrm->codec->pix_fmt, dst_width, dst_height); | |
frame->width = dst_width; | |
frame->height = dst_height; | |
frame->format = static_cast<int>(vstrm->codec->pix_fmt); | |
// encoding loop | |
avformat_write_header(outctx, nullptr); | |
int64_t frame_pts = 0; | |
unsigned nb_frames = 0; | |
bool end_of_stream = false; | |
int got_pkt = 0; | |
do { | |
if (!end_of_stream) { | |
// retrieve source image | |
cvcap >> image; | |
cv::imshow("press ESC to exit", image); | |
if (cv::waitKey(33) == 0x1b) | |
end_of_stream = true; | |
} | |
if (!end_of_stream) { | |
// convert cv::Mat(OpenCV) to AVFrame(FFmpeg) | |
const int stride[] = { static_cast<int>(image.step[0]) }; | |
sws_scale(swsctx, &image.data, stride, 0, image.rows, frame->data, frame->linesize); | |
frame->pts = frame_pts++; | |
} | |
// encode video frame | |
AVPacket pkt; | |
pkt.data = nullptr; | |
pkt.size = 0; | |
av_init_packet(&pkt); | |
ret = avcodec_encode_video2(vstrm->codec, &pkt, end_of_stream ? nullptr : frame, &got_pkt); | |
if (ret < 0) { | |
std::cerr << "fail to avcodec_encode_video2: ret=" << ret << "\n"; | |
break; | |
} | |
if (got_pkt) { | |
// rescale packet timestamp | |
pkt.duration = 1; | |
av_packet_rescale_ts(&pkt, vstrm->codec->time_base, vstrm->time_base); | |
// write packet | |
av_write_frame(outctx, &pkt); | |
std::cout << nb_frames << '\r' << std::flush; // dump progress | |
++nb_frames; | |
} | |
av_free_packet(&pkt); | |
} while (!end_of_stream || got_pkt); | |
av_write_trailer(outctx); | |
std::cout << nb_frames << " frames encoded" << std::endl; | |
av_frame_free(&frame); | |
avcodec_close(vstrm->codec); | |
avio_close(outctx->pb); | |
avformat_free_context(outctx); | |
return 0; | |
} |
Great stuff!
Exactly what I was looking for :)
Hey i am working on a project that produces raw image data of an usb- ir-camera. Unfortunately, it's not possible to detect the camera easily as a webcam, that's why i only want to work up with the raw data. That may includes that i need a shorter version of your code. In my code i have the following: cv::Mat mat(288,384, CV_8UC3, ppm);
, where the ppm stands for the ppm-Matrix with all the raw data. I also added the frame counter which produces new images in a given time. My question is now: Is there a direct way to produce the video file? At the end, the image data should be converted to a stream file and finally send as a network stream. I am working with avio_write(client, buf, n)
to send the buffer data to the client. Is there any possible solution of your code to send the frame buffer to client with my given function?
Thanks for all.
;-) Cheers
@zweipunktnull Are you looking for avio_open_dyn_buf
to retrieve encoded byte stream on memory?
Thanks
Master Yohhoy,
The code worked perfectly, and now I want to send the stream to the ffserver. Unless I misunderstood the code, the stream is generated at av_write_trailer, that is out of the capturing/ encoding loop.
Do you recommend I study your code and make the modifications or the paradigm of live streaming is totally different? I'm new to this.
Thanks!
Hi, I am trying to reuse this code to write in a video file images that I generated from openCV however the program fail with SIGSEGV inside avcodec_encode_video2. I found online it could probably come from a wrong stride value but I still can't fix this.
Any idea where this could come from ?
@PatrickJTemasys I have this same problem. Do you have fix it?
hi do you have a cmake file? which libs did you link to this project?
Perfect dude, it helps me a lot! Finally finished my realsense recorder!
anyone knows how can i send my opencv image using the rtsp protocol to another computer?
i always get connection refused in function
avformat_write_header(outctx, nullptr);
How to compile this? Bros
Hi, this is command for later person (on jetson nano, jetpack 4.3):
g++ -std=c++11 pkg-config --cflags opencv4
pkg-config --libs opencv4
ffmpeg_writer.cpp -o ffmpeg_writer -lstdc++ -lpthread -lopencv_core -lopencv_highgui -lopencv_videoio -lavformat -lavcodec -lavutil -lswscale
Hi........ Thanks a lot for your code.
It works great.
I need some more help, I was trying to add new data stream. But whenever I tried to write data stream it's crashes at free(pkt.data).
I don't know what I am doing wrong.
I just want to add one more addition Data stream with your video stream.
I appreciate lot, if I get some help from.
Thanks in advance.
Seeing a lot of errors : in the below part of the code.
vstrm->codec->width = dst_width;
vstrm->codec->height = dst_height;
vstrm->codec->pix_fmt = vcodec->pix_fmts[0];
vstrm->codec->time_base = vstrm->time_base = av_inv_q(dst_fps);
It says codec is not a part of vstrm
In 2021, this snippet use many deprecated APIs because of FFmpeg breaking changes.
I update source code for latest OpenCV 4.5 and FFmpeg 4.4 libraries.
https://gist.github.com/yohhoy/37f010d1fb8b3eb8b06a8669177d05f7
http://qiita.com/yohhoy/items/50c6771168a91e8d0367 (ja)