Created
May 31, 2014 20:37
-
-
Save w495/924e288ecae627d53640 to your computer and use it in GitHub Desktop.
filtering_video_ch.c
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/** | |
* @file | |
* Changed API example for decoding and filtering | |
* @example doc/examples/filtering_video.c | |
*/ | |
#include <unistd.h> | |
#include <libavcodec/avcodec.h> | |
#include <libavformat/avformat.h> | |
#include <libavfilter/avfiltergraph.h> | |
#include <libavfilter/avcodec.h> | |
#include <libavfilter/buffersink.h> | |
#include <libavfilter/buffersrc.h> | |
#include <libavutil/timestamp.h> | |
/** | |
* Создадим параметрические макросы с переменным числом аргументов. | |
* Они понадобятся для удобства отладки. | |
**/ | |
#define LOG_HELPER(fmt, ...) \ | |
fprintf( \ | |
stderr, \ | |
"\033[32mLOG:\033[0m " \ | |
"\033[33m%s\033[0m " \ | |
"\033[36m%s\033[0m " \ | |
"[\033[1m%d\033[0m] : " fmt "%s", \ | |
__FILE__, \ | |
__FUNCTION__, \ | |
__LINE__, \ | |
__VA_ARGS__ \ | |
) | |
#define LOG(...) LOG_HELPER(__VA_ARGS__, "\n") | |
// const char *filter_descr = "showinfo"; | |
// const char *filter_descr = "select='gt(scene,0.4)',showinfo"; | |
// const char *filter_descr = "scale=16:9,showinfo"; | |
const char *filter_descr = "scale=160:90,showinfo"; | |
typedef struct{ | |
AVFormatContext *fmt_ctx; | |
AVCodecContext *dec_ctx; | |
AVFilterContext *buffersink_ctx; | |
AVFilterContext *buffersrc_ctx; | |
AVFilterGraph *filter_graph; | |
AVFrame *frame; | |
int video_stream_index; | |
int ret; | |
} state_t; | |
state_t* state_create(); | |
void state_free(state_t* this); | |
static int open_input_file(state_t *state, const char *filename); | |
static int close_input_file(state_t *state); | |
static int init_filters(state_t *state, const char *filters_descr); | |
static void display_picref(AVFilterBufferRef *picref, AVRational time_base); | |
int main(int argc, char **argv) | |
{ | |
int ret; | |
AVPacket packet; | |
AVFrame *frame = avcodec_alloc_frame(); | |
int got_frame; | |
if (!frame) { | |
perror("Could not allocate frame"); | |
exit(1); | |
} | |
if (argc != 2) { | |
fprintf(stderr, "Usage: %s file\n", argv[0]); | |
exit(1); | |
} | |
avcodec_register_all(); | |
av_register_all(); | |
avfilter_register_all(); | |
state_t* state = state_create(); | |
if ((ret = open_input_file(state, argv[1])) < 0) | |
goto end; | |
if ((ret = init_filters(state, filter_descr)) < 0) | |
goto end; | |
/* read all packets */ | |
while (1) { | |
AVFilterBufferRef *picref; | |
if ((ret = av_read_frame(state->fmt_ctx, &packet)) < 0) | |
break; | |
if (packet.stream_index == state->video_stream_index) { | |
avcodec_get_frame_defaults(frame); | |
got_frame = 0; | |
ret = avcodec_decode_video2(state->dec_ctx, frame, &got_frame, &packet); | |
if (ret < 0) { | |
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); | |
break; | |
} | |
if (got_frame) { | |
LOG("1"); | |
frame->pts = av_frame_get_best_effort_timestamp(frame); | |
LOG("2"); | |
/* push the decoded frame into the filtergraph */ | |
if (av_buffersrc_add_frame(state->buffersrc_ctx, frame, 0) < 0) { | |
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); | |
break; | |
} | |
LOG("3"); | |
/* pull filtered pictures from the filtergraph */ | |
while (1) { | |
LOG("3.1"); | |
ret = av_buffersink_get_buffer_ref(state->buffersink_ctx, &picref, 0); | |
LOG("3.2"); | |
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) | |
break; | |
if (ret < 0) | |
goto end; | |
if (picref) { | |
LOG("if (packet.stream_index == state->video_stream_index)"); | |
display_picref(picref, state->buffersink_ctx->inputs[0]->time_base); | |
avfilter_unref_bufferp(&picref); | |
} | |
} | |
} | |
} | |
av_free_packet(&packet); | |
} | |
end: | |
state_free(state); | |
if (ret < 0 && ret != AVERROR_EOF) { | |
char buf[1024]; | |
av_strerror(ret, buf, sizeof(buf)); | |
fprintf(stderr, "Error occurred: %s\n", buf); | |
exit(1); | |
} | |
exit(0); | |
} | |
state_t* state_create(){ | |
state_t* state = malloc(sizeof(state_t)); | |
state->frame = avcodec_alloc_frame(); | |
state->filter_graph = avfilter_graph_alloc(); | |
return state; | |
} | |
void state_free(state_t* this){ | |
av_freep(&(this->frame)); | |
avfilter_graph_free(&(this->filter_graph)); | |
close_input_file(this); | |
} | |
static int open_input_file(state_t *state, const char *filename) { | |
AVCodec *dec; | |
if ((state->ret = avformat_open_input(&(state->fmt_ctx), filename, NULL, NULL)) < 0) { | |
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n"); | |
return state->ret; | |
} | |
if ((state->ret = avformat_find_stream_info(state->fmt_ctx, NULL)) < 0) { | |
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n"); | |
return state->ret; | |
} | |
/* select the video stream */ | |
state->ret = av_find_best_stream(state->fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0); | |
if (state->ret < 0) { | |
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n"); | |
return state->ret; | |
} | |
state->video_stream_index = state->ret; | |
state->dec_ctx = state->fmt_ctx->streams[state->video_stream_index]->codec; | |
/* init the video decoder */ | |
if ((state->ret = avcodec_open2(state->dec_ctx, dec, NULL)) < 0) { | |
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n"); | |
return state->ret; | |
} | |
state->ret = 0; | |
return state->ret; | |
} | |
static int close_input_file(state_t *state) { | |
if (state->dec_ctx) | |
avcodec_close(state->dec_ctx); | |
avformat_close_input(&(state->fmt_ctx)); | |
return 1; | |
} | |
static int init_filters(state_t *state, const char *filters_descr) { | |
char args[512]; | |
int ret; | |
AVFilter *buffersrc = avfilter_get_by_name("buffer"); | |
AVFilter *buffersink = avfilter_get_by_name("ffbuffersink"); | |
AVFilterInOut *outputs = avfilter_inout_alloc(); | |
AVFilterInOut *inputs = avfilter_inout_alloc(); | |
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGB24 /*AV_PIX_FMT_GRAY8*/, AV_PIX_FMT_NONE }; | |
AVBufferSinkParams *buffersink_params; | |
/* buffer video source: the decoded frames from the decoder will be inserted here. */ | |
snprintf( | |
args, | |
sizeof(args), | |
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", | |
state->dec_ctx->width, | |
state->dec_ctx->height, | |
state->dec_ctx->pix_fmt, | |
state->fmt_ctx->streams[state->video_stream_index]->time_base.num, | |
state->fmt_ctx->streams[state->video_stream_index]->time_base.den, | |
state->dec_ctx->sample_aspect_ratio.num, | |
state->dec_ctx->sample_aspect_ratio.den | |
); | |
ret = avfilter_graph_create_filter(&(state->buffersrc_ctx), buffersrc, "in", | |
args, NULL, state->filter_graph); | |
if (ret < 0) { | |
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n"); | |
return ret; | |
} | |
/* buffer video sink: to terminate the filter chain. */ | |
buffersink_params = av_buffersink_params_alloc(); | |
buffersink_params->pixel_fmts = pix_fmts; | |
ret = avfilter_graph_create_filter(&(state->buffersink_ctx), buffersink, "out", | |
NULL, buffersink_params, state->filter_graph); | |
av_free(buffersink_params); | |
if (ret < 0) { | |
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); | |
return ret; | |
} | |
/* Endpoints for the filter graph. */ | |
outputs->name = av_strdup("in"); | |
outputs->filter_ctx = state->buffersrc_ctx; | |
outputs->pad_idx = 0; | |
outputs->next = NULL; | |
inputs->name = av_strdup("out"); | |
inputs->filter_ctx = state->buffersink_ctx; | |
inputs->pad_idx = 0; | |
inputs->next = NULL; | |
if ((ret = avfilter_graph_parse(state->filter_graph, filters_descr, | |
&inputs, &outputs, NULL)) < 0) | |
return ret; | |
if ((ret = avfilter_graph_config(state->filter_graph, NULL)) < 0) | |
return ret; | |
return 0; | |
} | |
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) | |
static void display_picref(AVFilterBufferRef *picref, AVRational time_base) { | |
int x, y; | |
uint8_t *p0, *p; | |
double pts_time = picref->pts * av_q2d(time_base); | |
LOG("pts = %ld pts_time = %lf pos = %ld\n", picref->pts, pts_time, picref->pos); | |
p0 = picref->data[0]; | |
puts("\033c"); | |
for (y = 0; y < picref->video->h; y++) { | |
p = p0; | |
for (x = 0; x < picref->video->w; x++){ | |
//LOG("%d", *(p)); | |
putchar("#$&%?+^~-_,.\"'` "[*(p++) / 16]); | |
//putchar("10"[*(p++) / 128]); | |
} | |
putchar('\n'); | |
p0 += picref->linesize[0]; | |
} | |
fflush(stdout); | |
} | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment