-
-
Save roxlu/fb450aee77471a1d86f3 to your computer and use it in GitHub Desktop.
LibAV testing with mpegts
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include <stdlib.h> | |
#include <stdio.h> | |
#include <string.h> | |
#include <math.h> | |
#include "libavutil/mathematics.h" | |
#include "libavformat/avformat.h" | |
#include "libswscale/swscale.h" | |
#undef exit | |
/* 5 seconds stream duration */ | |
#define STREAM_DURATION 5.0 | |
#define STREAM_FRAME_RATE 25 /* 25 images/s */ | |
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) | |
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */ | |
static int sws_flags = SWS_BICUBIC; | |
/**************************************************************/ | |
/* video output */ | |
static AVFrame *picture, *tmp_picture; | |
static uint8_t *video_outbuf; | |
static int frame_count, video_outbuf_size; | |
/* Add a video output stream. */ | |
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) | |
{ | |
AVCodecContext *c; | |
AVStream *st; | |
AVCodec *codec; | |
/* find the video encoder */ | |
codec = avcodec_find_encoder(codec_id); | |
if (!codec) { | |
fprintf(stderr, "codec not found\n"); | |
exit(1); | |
} | |
st = avformat_new_stream(oc, codec); | |
if (!st) { | |
fprintf(stderr, "Could not alloc stream\n"); | |
exit(1); | |
} | |
c = st->codec; | |
/* Put sample parameters. */ | |
c->bit_rate = 400000; | |
/* Resolution must be a multiple of two. */ | |
c->width = 352; | |
c->height = 288; | |
/* timebase: This is the fundamental unit of time (in seconds) in terms | |
* of which frame timestamps are represented. For fixed-fps content, | |
* timebase should be 1/framerate and timestamp increments should be | |
* identical to 1. */ | |
c->time_base.den = STREAM_FRAME_RATE; | |
c->time_base.num = 1; | |
c->gop_size = 12; /* emit one intra frame every twelve frames at most */ | |
c->pix_fmt = STREAM_PIX_FMT; | |
if (c->codec_id == CODEC_ID_MPEG2VIDEO) { | |
/* just for testing, we also add B frames */ | |
c->max_b_frames = 2; | |
} | |
if (c->codec_id == CODEC_ID_MPEG1VIDEO) { | |
/* Needed to avoid using macroblocks in which some coeffs overflow. | |
* This does not happen with normal video, it just happens here as | |
* the motion of the chroma plane does not match the luma plane. */ | |
c->mb_decision = 2; | |
} | |
/* Some formats want stream headers to be separate. */ | |
if (oc->oformat->flags & AVFMT_GLOBALHEADER) | |
c->flags |= CODEC_FLAG_GLOBAL_HEADER; | |
return st; | |
} | |
static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height) | |
{ | |
AVFrame *picture; | |
uint8_t *picture_buf; | |
int size; | |
picture = avcodec_alloc_frame(); | |
if (!picture) | |
return NULL; | |
size = avpicture_get_size(pix_fmt, width, height); | |
picture_buf = av_malloc(size); | |
if (!picture_buf) { | |
av_free(picture); | |
return NULL; | |
} | |
avpicture_fill((AVPicture *)picture, picture_buf, | |
pix_fmt, width, height); | |
return picture; | |
} | |
static void open_video(AVFormatContext *oc, AVStream *st) | |
{ | |
AVCodecContext *c; | |
c = st->codec; | |
/* open the codec */ | |
if (avcodec_open2(c, NULL, NULL) < 0) { | |
fprintf(stderr, "could not open codec\n"); | |
exit(1); | |
} | |
video_outbuf = NULL; | |
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) { | |
video_outbuf_size = 200000; | |
video_outbuf = av_malloc(video_outbuf_size); | |
} | |
/* Allocate the encoded raw picture. */ | |
picture = alloc_picture(c->pix_fmt, c->width, c->height); | |
if (!picture) { | |
fprintf(stderr, "Could not allocate picture\n"); | |
exit(1); | |
} | |
tmp_picture = NULL; | |
if (c->pix_fmt != PIX_FMT_YUV420P) { | |
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height); | |
if (!tmp_picture) { | |
fprintf(stderr, "Could not allocate temporary picture\n"); | |
exit(1); | |
} | |
} | |
} | |
/* Prepare a dummy image. */ | |
static void fill_yuv_image(AVFrame *pict, int frame_index, | |
int width, int height) | |
{ | |
int x, y, i; | |
i = frame_index; | |
/* Y */ | |
for (y = 0; y < height; y++) | |
for (x = 0; x < width; x++) | |
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; | |
/* Cb and Cr */ | |
for (y = 0; y < height / 2; y++) { | |
for (x = 0; x < width / 2; x++) { | |
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; | |
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; | |
} | |
} | |
} | |
static void write_video_frame(AVFormatContext *oc, AVStream *st) | |
{ | |
int out_size, ret; | |
AVCodecContext *c; | |
static struct SwsContext *img_convert_ctx; | |
c = st->codec; | |
if (frame_count >= STREAM_NB_FRAMES) { | |
/* No more frames to compress. The codec has a latency of a few | |
* frames if using B-frames, so we get the last frames by | |
* passing the same picture again. */ | |
} else { | |
fill_yuv_image(picture, frame_count, c->width, c->height); | |
} | |
if (oc->oformat->flags & AVFMT_RAWPICTURE) { | |
} else { | |
/* encode the image */ | |
out_size = avcodec_encode_video(c, video_outbuf, | |
video_outbuf_size, picture); | |
/* If size is zero, it means the image was buffered. */ | |
if (out_size > 0) { | |
AVPacket pkt; | |
av_init_packet(&pkt); | |
if (c->coded_frame->pts != AV_NOPTS_VALUE) | |
pkt.pts = av_rescale_q(c->coded_frame->pts, | |
c->time_base, st->time_base); | |
if (c->coded_frame->key_frame) | |
pkt.flags |= AV_PKT_FLAG_KEY; | |
pkt.stream_index = st->index; | |
pkt.data = video_outbuf; | |
pkt.size = out_size; | |
/* Write the compressed frame to the media file. */ | |
ret = av_interleaved_write_frame(oc, &pkt); | |
} else { | |
ret = 0; | |
} | |
} | |
if (ret != 0) { | |
fprintf(stderr, "Error while writing video frame\n"); | |
exit(1); | |
} | |
frame_count++; | |
} | |
static void close_video(AVFormatContext *oc, AVStream *st) | |
{ | |
avcodec_close(st->codec); | |
av_free(picture->data[0]); | |
av_free(picture); | |
if (tmp_picture) { | |
av_free(tmp_picture->data[0]); | |
av_free(tmp_picture); | |
} | |
av_free(video_outbuf); | |
} | |
/**************************************************************/ | |
/* media file output */ | |
int main(int argc, char **argv) | |
{ | |
const char *filename; | |
AVOutputFormat *fmt; | |
AVFormatContext *oc; | |
AVStream *video_st; | |
double video_pts; | |
int i; | |
// setup | |
// ------------------------------- | |
av_register_all(); | |
if (argc != 2) { | |
printf("pass filename!\n"); | |
return 1; | |
} | |
filename = argv[1]; | |
fmt = av_guess_format("mpegts", NULL, NULL); | |
if (!fmt) { | |
printf("Could not deduce output format from file extension: using MPEG.\n"); | |
fmt = av_guess_format("mpeg", NULL, NULL); | |
} | |
if (!fmt) { | |
fprintf(stderr, "Could not find suitable output format\n"); | |
return 1; | |
} | |
// Create output format. | |
oc = avformat_alloc_context(); | |
if (!oc) { | |
fprintf(stderr, "Memory error\n"); | |
return 1; | |
} | |
oc->oformat = fmt; | |
const char* outfile = "tcp://127.0.0.1:9999"; | |
snprintf(oc->filename, sizeof(oc->filename), "%s", outfile); | |
video_st = NULL; | |
if (fmt->video_codec != CODEC_ID_NONE) { | |
//video_st = add_video_stream(oc, fmt->video_codec); | |
video_st = add_video_stream(oc, CODEC_ID_H264); | |
} | |
if (video_st) { | |
open_video(oc, video_st); | |
} | |
av_dump_format(oc, 0, outfile, 1); | |
// open output file if needed | |
if (!(fmt->flags & AVFMT_NOFILE)) { | |
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) { | |
fprintf(stderr, "Could not open '%s'\n", filename); | |
return 1; | |
} | |
} | |
// Create video: | |
// ------------------------------- | |
avformat_write_header(oc, NULL); | |
for (;;) { | |
if (video_st) { | |
video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den; | |
} | |
else { | |
video_pts = 0.0; | |
} | |
if ((!video_st || video_pts >= STREAM_DURATION)) { | |
break; | |
} | |
write_video_frame(oc, video_st); | |
} | |
av_write_trailer(oc); | |
// cleanup | |
// ------------------------------- | |
if (video_st) { | |
close_video(oc, video_st); | |
} | |
for (i = 0; i < oc->nb_streams; i++) { | |
av_freep(&oc->streams[i]->codec); | |
av_freep(&oc->streams[i]); | |
} | |
if (!(fmt->flags & AVFMT_NOFILE)) { | |
avio_close(oc->pb); | |
} | |
av_free(oc); | |
return 0; | |
} |
Author
roxlu
commented
Jul 23, 2012
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment