Last active
October 30, 2016 16:33
-
-
Save elda27/a29f997aef5a3f9ba2ef9eaf14e8b0d4 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#pragma warning(disable:4996) | |
#define __STDC_CONSTANT_MACROS | |
extern "C" | |
{ | |
#include <libavutil/avutil.h> | |
#include <libavutil/time.h> | |
#include <libavcodec/avcodec.h> | |
#include <libavformat/avformat.h> | |
#include <libswscale/swscale.h> | |
} | |
#include <string> | |
#include <vector> | |
#include <iostream> | |
#include <chrono> | |
#pragma comment(lib, "avcodec.lib") | |
#pragma comment(lib, "avformat.lib") | |
#pragma comment(lib, "avutil.lib") | |
//#pragma comment(lib, "avdevice.lib") | |
#pragma comment(lib, "swscale.lib") | |
//#pragma comment(lib, "swresample.lib") | |
//#pragma comment(lib, "postproc.lib") | |
// compatibility with newer API | |
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1) | |
# define av_frame_alloc avcodec_alloc_frame | |
# define av_frame_free avcodec_free_frame | |
#endif | |
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) | |
{ | |
FILE *pFile; | |
char szFilename[32]; | |
int y; | |
// Open file | |
sprintf(szFilename, "frame%d.ppm", iFrame); | |
pFile = fopen(szFilename, "wb"); | |
if (pFile == nullptr) | |
return; | |
// Write header | |
fprintf(pFile, "P6\n%d %d\n255\n", width, height); | |
// Write pixel data | |
for (y = 0; y<height; y++) | |
fwrite(pFrame->data[0] + y*pFrame->linesize[0], 1, width * 3, pFile); | |
// Close file | |
fclose(pFile); | |
} | |
int main(int argc, char *argv[]) | |
{ | |
// Initalizing these to nullptr prevents segfaults! | |
AVFormatContext *format_context = nullptr; | |
int i, videoStream; | |
AVCodecContext *codec_context_original = nullptr; | |
AVCodecContext *codec_context = nullptr; | |
AVCodec *codec = nullptr; | |
AVFrame *frame = nullptr; | |
AVFrame *frame_rgb = nullptr; | |
AVPacket packet = {}; | |
int frame_finished; | |
int num_bytes; | |
uint8_t *buffer = nullptr; | |
SwsContext *sws_context = nullptr; | |
if (argc < 2) | |
{ | |
printf("Please provide a movie file\n"); | |
return -1; | |
} | |
// Register all formats and codecs | |
av_register_all(); | |
av_log_set_level(1); | |
// Open video file | |
if (avformat_open_input(&format_context, argv[1], nullptr, nullptr) != 0) | |
return -1; // Couldn't open file | |
// Retrieve stream information | |
if (avformat_find_stream_info(format_context, nullptr)<0) | |
return -1; // Couldn't find stream information | |
// Dump information about file onto standard error | |
av_dump_format(format_context, 0, argv[1], 0); | |
// Find the first video stream | |
videoStream = -1; | |
for (i = 0; i<format_context->nb_streams; i++) | |
if (format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { | |
videoStream = i; | |
break; | |
} | |
if (videoStream == -1) | |
return -1; // Didn't find a video stream | |
// Get a pointer to the codec context for the video stream | |
codec_context_original = format_context->streams[videoStream]->codec; | |
// Find the decoder for the video stream | |
codec = avcodec_find_decoder(codec_context_original->codec_id); | |
if (codec == nullptr) { | |
fprintf(stderr, "Unsupported codec!\n"); | |
return -1; // Codec not found | |
} | |
// Copy context | |
codec_context = avcodec_alloc_context3(codec); | |
if (avcodec_copy_context(codec_context, codec_context_original) != 0) { | |
fprintf(stderr, "Couldn't copy codec context"); | |
return -1; // Error copying codec context | |
} | |
// Open codec | |
if (avcodec_open2(codec_context, codec, nullptr)<0) | |
return -1; // Could not open codec | |
// Allocate video frame | |
frame = av_frame_alloc(); | |
// Allocate an AVFrame structure | |
frame_rgb = av_frame_alloc(); | |
if (frame_rgb == nullptr) | |
return -1; | |
// Determine required buffer size and allocate buffer | |
num_bytes = avpicture_get_size(AV_PIX_FMT_RGB24, codec_context->width, | |
codec_context->height); | |
buffer = (uint8_t *)av_malloc(num_bytes * sizeof(uint8_t)); | |
// Assign appropriate parts of buffer to image planes in pFrameRGB | |
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset | |
// of AVPicture | |
avpicture_fill((AVPicture *)frame_rgb, buffer, AV_PIX_FMT_RGB24, | |
codec_context->width, codec_context->height); | |
// initialize SWS context for software scaling | |
sws_context = sws_getContext(codec_context->width, | |
codec_context->height, | |
codec_context->pix_fmt, | |
codec_context->width, | |
codec_context->height, | |
AV_PIX_FMT_RGB24, | |
SWS_BILINEAR, | |
nullptr, | |
nullptr, | |
nullptr | |
); | |
// Read frames and save first five frames to disk | |
i = 1; | |
auto real_video_stream = format_context->streams[videoStream]; | |
auto fps = double(codec_context->framerate.num) / codec_context->framerate.den; | |
int max_frame = format_context->duration / AV_TIME_BASE / 60; | |
double step_time = 60.0; | |
//auto time_base = (int64_t(codec_context->time_base.num) * AV_TIME_BASE) / int64_t(codec_context->time_base.den); | |
while (av_read_frame(format_context, &packet) >= 0) | |
{ | |
auto video_stream = format_context->streams[videoStream]; | |
//std::int64_t seek_ts = (start_time* i * (real_video_stream->time_base.den)) / (real_video_stream->time_base.num); | |
std::int64_t seek_ts = step_time * i * (video_stream->time_base.den); | |
std::int64_t seek_target = (seek_ts) / (video_stream->time_base.num); | |
//std::int64_t frame_number = av_rescale(10000, format_context->streams[videoStream]->time_base.den, format_context->streams[videoStream]->time_base.num) / 1000; | |
//std::int64_t seek_target = std::int64_t(i * fps) * time_base; | |
if (av_seek_frame(format_context, videoStream, seek_target, AVSEEK_FLAG_ANY) < 0) | |
{ | |
std::cout << "av_seek_frame failed." << std::endl; | |
++i; | |
} | |
//avcodec_flush_buffers(codec_context); | |
// Is this a packet from the video stream? | |
if (packet.stream_index == videoStream) { | |
// Decode video frame | |
avcodec_decode_video2(codec_context, frame, &frame_finished, &packet); | |
// Did we get a video frame? | |
if (frame_finished) | |
{ | |
// Convert the image from its native format to RGB | |
sws_scale(sws_context, (uint8_t const * const *)frame->data, | |
frame->linesize, 0, codec_context->height, | |
frame_rgb->data, frame_rgb->linesize); | |
// Save the frame to disk | |
if (++i <= max_frame) | |
{ | |
SaveFrame(frame_rgb, codec_context->width, codec_context->height, i); | |
} | |
else | |
{ | |
break; | |
} | |
} | |
} | |
// Free the packet that was allocated by av_read_frame | |
av_free_packet(&packet); | |
} | |
// Free the RGB image | |
av_free(buffer); | |
av_frame_free(&frame_rgb); | |
// Free the YUV frame | |
av_frame_free(&frame); | |
// Close the codecs | |
avcodec_close(codec_context); | |
avcodec_close(codec_context_original); | |
// Close the video file | |
avformat_close_input(&format_context); | |
return 0; | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment