Skip to content

Instantly share code, notes, and snippets.

@LinuxwitChdoCtOr
Created September 9, 2016 21:49
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 3 You must be signed in to fork a gist
  • Save LinuxwitChdoCtOr/1fbfc44443de8f0fcf6a742f5562376d to your computer and use it in GitHub Desktop.
Save LinuxwitChdoCtOr/1fbfc44443de8f0fcf6a742f5562376d to your computer and use it in GitHub Desktop.
#include "AvEncoder.h"
#include <iostream>
#include <string>
#include <queue>
#include <functional>
#include <cstdint>
extern "C"
{
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
#include <libavcodec/avcodec.h>
};
/**
* Put a description of the AVERROR code errnum in errbuf.
* In case of failure the global variable errno is set to indicate the
* error. Even in case of failure av_strerror() will print a generic
* error message indicating the errnum provided to errbuf.
*
* @param errnum error code to describe
* @param errbuf buffer to which description is written
* @param errbuf_size the size in bytes of errbuf
* @return 0 on success, a negative value if a description for errnum
* cannot be found
*
int av_strerror(int errnum, char *errbuf, size_t errbuf_size);
*/
#define STR(tok) #tok
#define ERRBUF_SIZE 256
static char g_errbuf[ERRBUF_SIZE];
static int g_errno = 0;
#define PRINT_AV_ERROR(x) \
if ( errno ) g_errno = errno; \
av_strerror( errno, g_errbuf, ERRBUF_SIZE ); \
if ( g_errno ) \
std::cerr << x << STR(:) << g_errbuf << std::endl; \
else \
std::cerr << x << STR(:UNKNOWN ERROR errno was NOT set ) << std::endl; \
g_errno = 0;
#define AVERR_STR(x) av_make_error_string( g_errbuf, ERRBUF_SIZE, (x) )
int AvEncoder::SendNewFrame( uint8_t * RGBFrame )
{
PushFrame( RGBFrame );
return m_status;
} // END int AvEncoder::SendNewFrame( uint8_t * RGBFrame )
void AvEncoder::Run()
{
uint8_t * frame = NULL;
m_running = true;
while( m_running )
{
if ( !m_frame_queue.empty() )
{
m_status = AVE_BUSY_STATUS;
frame = PopFrame();
/// std::cout << "Frame " << m_frame_count << " encodingbuffer " << static_cast<void*>( frame ) << std::endl;
if ( frame && !m_frame_queue.empty() ) std::cerr << "ERROR:AvEncoder:run: ! m_frame_queue.empty() NOT EMPTY = " << m_frame_queue.size() << std::endl;
}
else
{
frame = NULL;
}
if ( frame != NULL )
{
m_status = Encode(frame);
frame = NULL;
}
else
{
m_status = AVE_IDLE_STATUS;
/// std::cerr << " ===== :AvEncoder:run: frame == NULL SLEEPING" << std::endl;
usleep( 1000 ); // sleep 1 ms
}
}
// Process remaining frames
while ( !m_frame_queue.empty() )
{
std::cerr << " ::AvEncoder:Run: encode backlog frames " << m_frame_queue.size() << std::endl;
m_status = Encode( PopFrame() );
usleep( 5000 ); // I have no clue how to check the encoder
}
std::cerr << " ::AvEncoder:Run: encoder going to FLUSH MODE :" << std::endl;
FlushMode();
pthread_exit( (void *) EXIT_SUCCESS );
} /// END void AvEncoder::run()
// #define DEBUG_THE_TIME_STREAM
int AvEncoder::Open( std::string fname, unsigned int w_, unsigned int h_, enum AVCodecID codec_id, int fps, int gop, int bps )
{
int rtn_val = AVE_IDLE_STATUS;
unsigned int tag = 0;
AVDictionary * av_dict_opts = NULL;
AVStream * video_st = NULL;
m_av_encode_codec = avcodec_find_encoder( codec_id );
if ( !m_av_encode_codec )
{
PRINT_AV_ERROR( "ERROR:avcodec_find_encoder:" );
return AVE_UNKNOW_LIB_ERROR;
}
/**
* Allocate an AVFormatContext for an output format.
* avformat_free_context() can be used to free the context and
* everything allocated by the framework within it.
*
* @param *ctx is set to the created format context, or to NULL in
* case of failure
* @param oformat format to use for allocating the context, if NULL
* format_name and filename are used instead
* @param format_name the name of output format to use for allocating the
* context, if NULL filename is used instead
* @param filename the name of the filename to use for allocating the
* context, may be NULL
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure
*
* int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat,
* const char *format_name, const char *filename);
*/
avformat_alloc_output_context2( &m_av_out_fmt_ctx, NULL, NULL, fname.c_str() ); //(char*)"mkv", fname.c_str() );
// avformat_alloc_output_context2( &m_av_out_fmt_ctx, NULL, "mpegts", fname.c_str() ); //(char*)"mkv", fname.c_str() );
if ( !m_av_out_fmt_ctx )
{
PRINT_AV_ERROR( "ERROR:avformat_alloc_output_context2:" );
return AVE_UNKNOW_LIB_ERROR;
}
/**
* Allocate an AVCodecContext and set its fields to default values. The
* resulting struct should be freed with avcodec_free_context().
*
* @param codec if non-NULL, allocate private data and initialize defaults
* for the given codec. It is illegal to then call avcodec_open2()
* with a different codec.
* If NULL, then the codec-specific defaults won't be initialized,
* which may result in suboptimal default settings (this is
* important mainly for encoders, e.g. libx264).
*
* @return An AVCodecContext filled with default values or NULL on failure.
*
* AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);
*/
m_av_codec_ctx = avcodec_alloc_context3( m_av_encode_codec );
if ( !m_av_codec_ctx )
{
PRINT_AV_ERROR( "ERROR:avcodec_alloc_context3:" );
return AVE_UNKNOW_LIB_ERROR;
}
m_av_codec_ctx->codec_id = codec_id;
// m_av_codec_ctx->bit_rate = bps;
(void)bps;
m_av_codec_ctx->width = w_; /// \note multiple of 2
m_av_codec_ctx->height = h_; /// \note multiple of 2
m_av_codec_ctx->time_base = (AVRational) { 1, fps }; /// 1000000 }; (void)fps; // }; //* 2;
m_av_codec_ctx->gop_size = gop; // Intra frames per x P frames
(void)gop;
m_av_codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P; // MUST DO NOT CHANGE nvenc required
m_av_codec_ctx->max_b_frames = 0;
/// encoding nvenc profiles
av_dict_set( &av_dict_opts, "preset", "lossless", 0 ); /// hq ll lossless losslesshq
av_dict_set( &av_dict_opts, "profile", "high", 0 ); /// high444p
av_dict_set( &av_dict_opts, "level", "4.1", 0 );
av_dict_set( &av_dict_opts, "rc", "vbr", 0 ); /// constqp vbr cbr ll_2pass_quality ll_2pass_size vbr_2pass vbr_minqp
// av_dict_set( &av_dict_opts, "rc", "vbr_2pass", 0 ); /// extra frames in encoding
av_dict_set( &av_dict_opts, "gpu", "1", 0 );
//av_dict_set( &av_dict_opts, "surfaces", "100", 0 );
// av_dict_set( &av_dict_opts, "2pass", "1", 0 ); /// extra frames in encoding
// av_dict_set( &av_dict_opts, "threads", "0", 0 );
// av_dict_set( &av_dict_opts, "threads", "4", 0 );
/// non specific
av_dict_set( &av_dict_opts, "b", "5M", 0 );
av_dict_set( &av_dict_opts, "qmin", "1", 0 );
av_dict_set( &av_dict_opts, "qmax", "20", 0 );
// av_dict_set( &av_dict_opts, "g", "15", 0 );
av_dict_set( &av_dict_opts, "delay", "0", 0 );
// av_dict_set( &av_dict_opts, "framerate", "60", 0 );
/**
* Add a new stream to a media file.
*
* When demuxing, it is called by the demuxer in read_header(). If the
* flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also
* be called in read_packet().
*
* When muxing, should be called by the user before avformat_write_header().
*
* User is required to call avcodec_close() and avformat_free_context() to
* clean up the allocation by avformat_new_stream().
*
* @param s media file handle
* @param c If non-NULL, the AVCodecContext corresponding to the new stream
* will be initialized to use this codec. This is needed for e.g. codec-specific
* defaults to be set, so codec should be provided if it is known.
*
* @return newly created stream or NULL on error.
*
* AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c);
*/
video_st = avformat_new_stream( m_av_out_fmt_ctx, m_av_encode_codec );
if ( !video_st )
{
PRINT_AV_ERROR( "ERROR:avformat_new_stream:" );
return AVE_UNKNOW_LIB_ERROR;
}
if ( av_codec_get_tag2( m_av_out_fmt_ctx->oformat->codec_tag, AV_CODEC_ID_H264, &tag ) == 0 )
{
av_log( NULL, AV_LOG_ERROR, "could not find codec tag for codec id %d, default to 0.\n", m_av_encode_codec->id );
}
video_st->codecpar->codec_tag = tag;
video_st->time_base = AVRational{ 1, fps };
av_stream_set_r_frame_rate( video_st, AVRational{ fps, 1 } );
/**
* Fill the parameters struct based on the values from the supplied codec
* context. Any allocated fields in par are freed and replaced with duplicates
* of the corresponding fields in codec.
*
* @return >= 0 on success, a negative AVERROR code on failure
*/
if ( avcodec_parameters_from_context( video_st->codecpar, m_av_codec_ctx ) < 0 )
{
PRINT_AV_ERROR( "ERROR:avcodec_parameters_from_context:" );
return rtn_val;
}
/**
* Initialize the AVCodecContext to use the given AVCodec. Prior to using this
* function the context has to be allocated with avcodec_alloc_context3().
*
* The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
* avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
* retrieving a codec.
*
* @warning This function is not thread safe!
*
* @note Always call this function before using decoding routines (such as
* @ref avcodec_receive_frame()).
*
* @code
* avcodec_register_all();
* av_dict_set(&opts, "b", "2.5M", 0);
* codec = avcodec_find_decoder(AV_CODEC_ID_H264);
* if (!codec)
* exit(1);
*
* context = avcodec_alloc_context3(codec);
*
* if (avcodec_open2(context, codec, opts) < 0)
* exit(1);
* @endcode
*
* @param avctx The context to initialize.
* @param codec The codec to open this context for. If a non-NULL codec has been
* previously passed to avcodec_alloc_context3() or
* for this context, then this parameter MUST be either NULL or
* equal to the previously passed codec.
* @param options A dictionary filled with AVCodecContext and codec-private options.
* On return this object will be filled with options that were not found.
*
* @return zero on success, a negative value on error
* @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
* av_dict_set(), av_opt_find().
*
* int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
*/
rtn_val = avcodec_open2( m_av_codec_ctx, m_av_encode_codec, &av_dict_opts );
if ( rtn_val < 0 )
{
PRINT_AV_ERROR( "ERROR:avcodec_open2:" );
return rtn_val;
}
if ( m_av_out_fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER )
m_av_out_fmt_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
av_dump_format( m_av_out_fmt_ctx, 0, fname.c_str(), 1 );
if ( !( m_av_out_fmt_ctx->oformat->flags & AVFMT_NOFILE ) )
{
rtn_val = avio_open( &m_av_out_fmt_ctx->pb, fname.c_str(), AVIO_FLAG_WRITE );
if ( rtn_val < 0 )
{
PRINT_AV_ERROR( "ERROR:avio_open:" );
return rtn_val;
}
}
rtn_val = avformat_write_header( m_av_out_fmt_ctx, &av_dict_opts );
if ( rtn_val < 0 )
{
PRINT_AV_ERROR( "ERROR:avformat_write_header:" );
return rtn_val;
}
av_init_packet( &m_av_packet );
m_av_packet.data = NULL; // packet data will be allocated by the encoder
m_av_packet.size = 0;
return SetupFrameBuffers( w_, h_ );
}
int AvEncoder::Encode( uint8_t * RGBFrame )
{
#if defined(DEBUG_THE_TIME_STREAM)
static int print_counter = 0;
#endif
int rtn_val = 0;
if ( RGBFrame )
{
/// memcpy( m_src_frame->data[0], RGBFrame, m_rgb_size );
m_src_frame->data[0] = RGBFrame;
/**
* Scale the image slice in srcSlice and put the resulting scaled
* slice in the image in dst. A slice is a sequence of consecutive
* rows in an image.
*
* Slices have to be provided in sequential order, either in
* top-bottom or bottom-top order. If slices are provided in
* non-sequential order the behavior of the function is undefined.
*
* @param c the scaling context previously created with
* sws_getContext()
* @param srcSlice the array containing the pointers to the planes of
* the source slice
* @param srcStride the array containing the strides for each plane of
* the source image
* @param srcSliceY the position in the source image of the slice to
* process, that is the number (counted starting from
* zero) in the image of the first row of the slice
* @param srcSliceH the height of the source slice, that is the number
* of rows in the slice
* @param dst the array containing the pointers to the planes of
* the destination image
* @param dstStride the array containing the strides for each plane of
* the destination image
* @return the height of the output slice
*
* int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
* const int srcStride[], int srcSliceY, int srcSliceH,
* uint8_t *const dst[], const int dstStride[]);
*/
// std::cerr << "++++++++++::AvEncoder:ReadFrame:+++++++++" << std::endl;
sws_scale ( m_sws_ctx,
(uint8_t const * const *)m_src_frame->data,
m_src_frame->linesize,
0,
m_src_frame->height,
m_dst_frame->data,
m_dst_frame->linesize
);
/// Flip frame because RGB openGL bottom left instead of top left
for ( int i = 0; i < 4; i++ )
{
if ( i ) m_dst_frame->data[i] += m_dst_frame->linesize[i] * ((m_dst_frame->height >> 1)-1);
else m_dst_frame->data[i] += m_dst_frame->linesize[i] * (m_dst_frame->height-1);
m_dst_frame->linesize[i] = -m_dst_frame->linesize[i];
}
}
/**
* Supply a raw video or audio frame to the encoder. Use avcodec_receive_packet()
* to retrieve buffered output packets.
*
* @param avctx codec context
* @param[in] frame AVFrame containing the raw audio or video frame to be encoded.
* Ownership of the frame remains with the caller, and the
* encoder will not write to the frame. The encoder may create
* a reference to the frame data (or copy it if the frame is
* not reference-counted).
* It can be NULL, in which case it is considered a flush
* packet. This signals the end of the stream. If the encoder
* still has packets buffered, it will return them after this
* call. Once flushing mode has been entered, additional flush
* packets are ignored, and sending frames will return
* AVERROR_EOF.
*
* For audio:
* If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
* can have any number of samples.
* If it is not set, frame->nb_samples must be equal to
* avctx->frame_size for all frames except the last.
* The final frame may be smaller than avctx->frame_size.
* @return 0 on success, otherwise negative error code:
* AVERROR(EAGAIN): input is not accepted right now - the frame must be
* resent after trying to read output packets
* AVERROR_EOF: the encoder has been flushed, and no new frames can
* be sent to it
* AVERROR(EINVAL): codec not opened, refcounted_frames not set, it is a
* decoder, or requires flush
* AVERROR(ENOMEM): failed to add packet to internal queue, or similar
* other errors: legitimate decoding errors
*
* int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame);
*/
if ( m_dst_frame == NULL ) std::cerr << " +++++ :AvEncoder:Encode:avcodec_send_frame: m_dst_frame == NULL" << std::endl;
// av_frame_get_best_effort_timestamp( m_dst_frame ) /// does not work
// if ( m_dst_frame ) m_dst_frame->pts = m_frame_count;
m_dst_frame->pts = m_av_codec_ctx->frame_number;
/// if ( m_frame_count == 0 )
/// m_dst_frame->key_frame = 1;
/// else if ( m_dst_frame->key_frame && ( ( m_frame_count % 5 ) == 0 ) )
/// std::cerr << " +++++ :AvEncoder:Encode: KEY FRAME IS STILL SET" << std::endl;
rtn_val = avcodec_send_frame( m_av_codec_ctx, m_dst_frame );
if ( ( rtn_val < 0 ) && ( rtn_val != AVERROR_EOF ) )
{
PRINT_AV_ERROR( "ERROR::AvEncoder:Encode:avcodec_send_frame:" );
return AVE_UNKNOW_LIB_ERROR;
}
m_frame_count++;
do
{
/**
* Read encoded data from the encoder.
*
* @param avctx codec context
* @param avpkt This will be set to a reference-counted packet allocated by the
* encoder. Note that the function will always call
* av_frame_unref(frame) before doing anything else.
* @return 0 on success, otherwise negative error code:
* AVERROR(EAGAIN): output is not available right now - user must try
* to send input
* AVERROR_EOF: the encoder has been fully flushed, and there will be
* no more output packets
* AVERROR(EINVAL): codec not opened, or it is an encoder
* other errors: legitimate decoding errors
*
* int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt);
*/
rtn_val = avcodec_receive_packet( m_av_codec_ctx, &m_av_packet );
#if defined(DEBUG_THE_TIME_STREAM)
if ( m_frame_count > 30 && print_counter < 10 )
std::cout << " ::AvEncoder:Encode: frame = " << m_frame_count
<< " streams[ 0 ] time_base = " << m_av_out_fmt_ctx->streams[ 0 ]->time_base.num << " dem " << m_av_out_fmt_ctx->streams[ 0 ]->time_base.den
<< " nb_frames = " << m_av_out_fmt_ctx->streams[ 0 ]->nb_frames
<< " cur_dts = " << m_av_out_fmt_ctx->streams[ 0 ]->cur_dts
<< std::endl;
#endif
if ( rtn_val == 0 ) // || rtn_val == AVERROR(EAGAIN) )
{
// av_packet_rescale_ts( &m_av_packet, m_av_codec_ctx->time_base, m_av_out_fmt_ctx->streams[ 0 ]->time_base );
if ( m_av_packet.pts != AV_NOPTS_VALUE )
m_av_packet.pts = av_rescale_q( m_av_packet.pts, m_av_codec_ctx->time_base, m_av_out_fmt_ctx->streams[ 0 ]->time_base );
if ( m_av_packet.dts != AV_NOPTS_VALUE )
m_av_packet.dts = av_rescale_q( m_av_packet.dts, m_av_codec_ctx->time_base, m_av_out_fmt_ctx->streams[ 0 ]->time_base );
/// #if defined(DEBUG_THE_TIME_STREAM)
// if ( print_counter++ < 10 )
if ( ( m_frame_count % 10 ) == 0 )
std::cout << " ::AvEncoder:Encode: frame = " << m_frame_count
<< " packet: pts = " << m_av_packet.pts
<< " packet: dts = " << m_av_packet.dts
<< std::endl;
/// #endif
/**
* Write a packet to an output media file ensuring correct interleaving.
*
* This function will buffer the packets internally as needed to make sure the
* packets in the output file are properly interleaved in the order of
* increasing dts. Callers doing their own interleaving should call
* av_write_frame() instead of this function.
*
* Using this function instead of av_write_frame() can give muxers advance
* knowledge of future packets, improving e.g. the behaviour of the mp4
* muxer for VFR content in fragmenting mode.
*
* @param s media file handle
* @param pkt The packet containing the data to be written.
* <br>
* If the packet is reference-counted, this function will take
* ownership of this reference and unreference it later when it sees
* fit.
* The caller must not access the data through this reference after
* this function returns. If the packet is not reference-counted,
* libavformat will make a copy.
* <br>
* This parameter can be NULL (at any time, not just at the end), to
* flush the interleaving queues.
* <br>
* Packet's @ref AVPacket.stream_index "stream_index" field must be
* set to the index of the corresponding stream in @ref
* AVFormatContext.streams "s->streams".
* <br>
* The timestamps (@ref AVPacket.pts "pts", @ref AVPacket.dts "dts")
* must be set to correct values in the stream's timebase (unless the
* output format is flagged with the AVFMT_NOTIMESTAMPS flag, then
* they can be set to AV_NOPTS_VALUE).
* The dts for subsequent packets in one stream must be strictly
* increasing (unless the output format is flagged with the
* AVFMT_TS_NONSTRICT, then they merely have to be nondecreasing).
* @ref AVPacket.duration "duration") should also be set if known.
*
* @return 0 on success, a negative AVERROR on error. Libavformat will always
* take care of freeing the packet, even if this function fails.
*
* @see av_write_frame(), AVFormatContext.max_interleave_delta
*
* int av_interleaved_write_frame( AVFormatContext *s, AVPacket *pkt );
*/
rtn_val = av_interleaved_write_frame( m_av_out_fmt_ctx, &m_av_packet );
if ( rtn_val < 0 )
{
std::cerr << "ERROR::AvEncoder:Encode:av_interleaved_write_frame:" << AVERR_STR( rtn_val ) << std::endl;
}
}
else if ( ( rtn_val < 0 ) && ( rtn_val != AVERROR(EAGAIN) ) )
{
std::cerr << "ERROR::AvEncoder:Encode:avcodec_receive_packet:" << AVERR_STR( rtn_val ) << std::endl;
PRINT_AV_ERROR( "ERROR::AvEncoder:Encode:avcodec_receive_packet: NOT EAGAIN :" );
}
} while ( rtn_val == 0 ); /// && !frame_done );
return rtn_val;
} // END void AvEncoder::Encode( uint8_t * RGBFrame )
int AvEncoder::SetupFrameBuffers( unsigned int w_, unsigned int h_ )
{
int rtn_val = 0;
uint8_t * buffer = NULL;
/// std::cerr << "==AvEncoder:SetupFrameBuffers: w_ = " << w_
/// << " h_ = " << h_ << std::endl;
/**
* Allocate an AVFrame and set its fields to default values. The resulting
* struct must be freed using av_frame_free().
*
* @return An AVFrame filled with default values or NULL on failure.
*
* @note this only allocates the AVFrame itself, not the data buffers. Those
* must be allocated through other means, e.g. with av_frame_get_buffer() or
* manually.
*
* AVFrame *av_frame_alloc(void);
*/
m_src_frame = av_frame_alloc();
if ( !m_src_frame < 0 )
{
PRINT_AV_ERROR( "ERROR::AvEncoder:Open:av_frame_alloc:" );
return rtn_val;
}
m_src_frame->format = AV_PIX_FMT_RGB24;
m_src_frame->width = w_;
m_src_frame->height = h_;
#if 0
buffer = (uint8_t*) av_malloc( av_image_get_buffer_size( AV_PIX_FMT_RGB24, w_, h_, 1 ) );
/**
* Setup the data pointers and linesizes based on the specified image
* parameters and the provided array.
*
* The fields of the given image are filled in by using the src
* address which points to the image data buffer. Depending on the
* specified pixel format, one or multiple image data pointers and
* line sizes will be set. If a planar format is specified, several
* pointers will be set pointing to the different picture planes and
* the line sizes of the different planes will be stored in the
* lines_sizes array. Call with src == NULL to get the required
* size for the src buffer.
*
* To allocate the buffer and fill in the dst_data and dst_linesize in
* one call, use av_image_alloc().
*
* @param dst_data data pointers to be filled in
* @param dst_linesizes linesizes for the image in dst_data to be filled in
* @param src buffer which will contain or contains the actual image data, can be NULL
* @param pix_fmt the pixel format of the image
* @param width the width of the image in pixels
* @param height the height of the image in pixels
* @param align the value used in src for linesize alignment
* @return the size in bytes required for src, a negative error code
* in case of failure
*
* int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src,
* enum AVPixelFormat pix_fmt, int width, int height, int align);
*/
av_image_fill_arrays( m_src_frame->data, m_src_frame->linesize, buffer,
AV_PIX_FMT_RGB24, w_, h_, 1 );
#endif
#if 1
/**
* Allocate an image with size w and h and pixel format pix_fmt, and
* fill pointers and linesizes accordingly.
* The allocated image buffer has to be freed by using
* av_freep(&pointers[0]).
*
* @param align the value to use for buffer size alignment
* @return the size in bytes required for the image buffer, a negative
* error code in case of failure
*
* int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align);
*/
rtn_val = av_image_alloc( m_src_frame->data, m_src_frame->linesize, m_src_frame->width, m_src_frame->height, AV_PIX_FMT_RGB24, 24 );
if ( rtn_val < 0 )
{
PRINT_AV_ERROR( "ERROR::AvEncoder:Open:av_image_alloc:" );
return rtn_val;
}
m_rgb_size = rtn_val;
#endif
/// std::cerr << "==AvEncoder:SetupFrameBuffers:m_src_frame->width = " << m_src_frame->width
/// << " m_src_frame->height = " << m_src_frame->height << std::endl;
/**
* Allocate an AVFrame and set its fields to default values. The resulting
* struct must be freed using av_frame_free().
*
* @return An AVFrame filled with default values or NULL on failure.
*
* @note this only allocates the AVFrame itself, not the data buffers. Those
* must be allocated through other means, e.g. with av_frame_get_buffer() or
* manually.
*
* AVFrame *av_frame_alloc(void);
*/
m_dst_frame = av_frame_alloc();
if ( !m_dst_frame )
{
PRINT_AV_ERROR( "ERROR::AvEncoder:Open:av_frame_alloc:" );
return AVE_UNKNOW_LIB_ERROR;
}
m_dst_frame->format = m_av_codec_ctx->pix_fmt;
m_dst_frame->width = w_;
m_dst_frame->height = h_;
#if 0
buffer = (uint8_t*) av_malloc( av_image_get_buffer_size( AV_PIX_FMT_YUV420P, w_, h_, 1 ) );
/**
* Setup the data pointers and linesizes based on the specified image
* parameters and the provided array.
*
* The fields of the given image are filled in by using the src
* address which points to the image data buffer. Depending on the
* specified pixel format, one or multiple image data pointers and
* line sizes will be set. If a planar format is specified, several
* pointers will be set pointing to the different picture planes and
* the line sizes of the different planes will be stored in the
* lines_sizes array. Call with src == NULL to get the required
* size for the src buffer.
*
* To allocate the buffer and fill in the dst_data and dst_linesize in
* one call, use av_image_alloc().
*
* @param dst_data data pointers to be filled in
* @param dst_linesizes linesizes for the image in dst_data to be filled in
* @param src buffer which will contain or contains the actual image data, can be NULL
* @param pix_fmt the pixel format of the image
* @param width the width of the image in pixels
* @param height the height of the image in pixels
* @param align the value used in src for linesize alignment
* @return the size in bytes required for src, a negative error code
* in case of failure
*
* int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src,
* enum AVPixelFormat pix_fmt, int width, int height, int align);
*/
av_image_fill_arrays( m_dst_frame->data, m_dst_frame->linesize, buffer,
AV_PIX_FMT_YUV420P, w_, h_, 1 );
#endif
#if 2
/**
* Allocate an image with size w and h and pixel format pix_fmt, and
* fill pointers and linesizes accordingly.
* The allocated image buffer has to be freed by using
* av_freep(&pointers[0]).
*
* @param align the value to use for buffer size alignment
* @return the size in bytes required for the image buffer, a negative
* error code in case of failure
*
* int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align);
*/
rtn_val = av_image_alloc( m_dst_frame->data, m_dst_frame->linesize, m_dst_frame->width, m_dst_frame->height, m_av_codec_ctx->pix_fmt, 32 ); // 1 16 32
if ( rtn_val < 0 )
{
PRINT_AV_ERROR( "ERROR::AvEncoder:Open:av_image_alloc:" );
}
#endif
/**
* Allocate and return an SwsContext. You need it to perform
* scaling/conversion operations using sws_scale().
*
* @param srcW the width of the source image
* @param srcH the height of the source image
* @param srcFormat the source image format
* @param dstW the width of the destination image
* @param dstH the height of the destination image
* @param dstFormat the destination image format
* @param flags specify which algorithm and options to use for rescaling
* @param param extra parameters to tune the used scaler
* For SWS_BICUBIC param[0] and [1] tune the shape of the basis
* function, param[0] tunes f(1) and param[1] f´(1)
* For SWS_GAUSS param[0] tunes the exponent and thus cutoff
* frequency
* For SWS_LANCZOS param[0] tunes the width of the window function
* @return a pointer to an allocated context, or NULL in case of error
* @note this function is to be removed after a saner alternative is
* written
*
* struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
* int dstW, int dstH, enum AVPixelFormat dstFormat,
* int flags, SwsFilter *srcFilter,
* SwsFilter *dstFilter, const double *param);
*/
/// std::cerr << "==========AvEncoder:SetupFrameBuffers:=========" << std::endl;
m_sws_ctx = sws_getContext ( w_, h_, AV_PIX_FMT_RGB24,
w_, h_, AV_PIX_FMT_YUV420P,
SWS_BILINEAR, NULL,
NULL, NULL ); /// SWS_BICUBIC
/// std::cerr << "==========::AvEncoder:SetupFrameBuffers:=========" << std::endl;
if ( rtn_val < 0 )
{
PRINT_AV_ERROR( "ERROR::AvEncoder:Open:sws_getContext:" );
return rtn_val;
}
return rtn_val;
} // END AvEncoder::SetupFrameBuffers()
void AvEncoder::FlushMode()
{
int rtn_val;
std::cout << " ::AvEncoder:FlushMode: checking the encoder status = " << AVERR_STR( m_status ) << std::endl;
if ( m_status != AVERROR_EOF )
{
std::cout << " ::AvEncoder:FlushMode: Drain the encoder" << std::endl;
rtn_val = avcodec_send_frame( m_av_codec_ctx, NULL );
if ( ( rtn_val != AVERROR_EOF ) && ( rtn_val == AVERROR(EAGAIN) ) )
{
// In trouble at this point
std::cerr << "ERROR::AvEncoder:FlushMode:avcodec_send_frame:" << AVERR_STR( rtn_val ) << std::endl;
PRINT_AV_ERROR( "ERROR::AvEncoder:FlushMode:avcodec_send_frame: NOT EOF && NOT EAGAIN):" );
}
/// Read packets to EOF
do
{
rtn_val = avcodec_receive_packet( m_av_codec_ctx, &m_av_packet );
std::cerr << " ::AvEncoder:FlushMode:avcodec_receive_packet: " << AVERR_STR( rtn_val ) << std::endl;
if ( rtn_val == AVERROR_EOF ) m_status = AVERROR_EOF;
// Only write out on success
if ( rtn_val == 0 )
{
av_packet_rescale_ts( &m_av_packet, m_av_codec_ctx->time_base, m_av_out_fmt_ctx->streams[ 0 ]->time_base );
rtn_val = av_interleaved_write_frame( m_av_out_fmt_ctx, &m_av_packet );
if ( rtn_val < 0 )
{
// This is BAD !!!
std::cerr << "ERROR::AvEncoder:FlushMode:av_interleaved_write_frame:" << AVERR_STR( rtn_val ) << std::endl;
PRINT_AV_ERROR( "ERROR::AvEncoder:FlushMode:av_interleaved_write_frame:" );
m_status = AVERROR_EOF; // In BIG trouble at this point, just give up
}
}
else if ( ( rtn_val < 0 ) && ( rtn_val != AVERROR_EOF ) )
{
std::cerr << "ERROR::AvEncoder:FlushMode:av_interleaved_write_frame:" << AVERR_STR( rtn_val ) << std::endl;
PRINT_AV_ERROR( "ERROR::AvEncoder:FlushMode:av_interleaved_write_frame: NOT EOF :" );
m_status = AVERROR_EOF; // Give up at this point
}
} while ( m_status != AVERROR_EOF );
} // if ( m_status != AVERROR_EOF )
/// Close the output
if ( m_av_out_fmt_ctx )
{
std::cerr << " ::AvEncoder:FlushMode:av_write_trailer: START :" << std::endl;
rtn_val = av_write_trailer( m_av_out_fmt_ctx );
if ( rtn_val != 0 )
{
std::cerr << "ERROR::AvEncoder:FlushMode:av_write_trailer:" << AVERR_STR( rtn_val ) << std::endl;
PRINT_AV_ERROR( "ERROR::AvEncoder:FlushMode:av_interleaved_write_frame: NOT EOF :" );
}
std::cerr << " ::AvEncoder:FlushMode:av_write_trailer: DONE :" << std::endl;
}
///
/// \note DO NOT SKIP THIS STEP
/// Close codec context before freeing the frames it is using
///
if ( m_av_codec_ctx )
{
rtn_val = avcodec_close( m_av_codec_ctx );
if ( rtn_val != 0 )
{
std::cerr << "ERROR::AvEncoder:FlushMode:avcodec_close:" << AVERR_STR( rtn_val ) << std::endl;
PRINT_AV_ERROR( "ERROR::AvEncoder:FlushMode:avcodec_close" );
}
av_free( m_av_codec_ctx );
std::cout << " ::AvEncoder:FlushMode:avcodec_close:m_av_codec_ctx: CODEC CLOSED :" << std::endl;
}
if ( m_dst_frame )
{
/// if ( m_dst_frame->data[0] )
/// {
/// av_freep( &(m_dst_frame->data[0]) ); // buf is now NULL
/// }
av_frame_free( &m_dst_frame );
}
if ( m_src_frame )
{
/// if ( m_src_frame->data[0] )
/// {
/// av_freep( &(m_src_frame->data[0]) ); // buf is now NULL
/// }
av_frame_free( &m_src_frame );
}
if ( m_av_out_fmt_ctx && !( m_av_out_fmt_ctx->oformat->flags & AVFMT_NOFILE ) && m_av_out_fmt_ctx->pb )
{
avio_close( m_av_out_fmt_ctx->pb );
/// std::cout << " ::AvEncoder:FlushMode: closed avio" << std::endl;
}
m_av_out_fmt_ctx->pb = NULL;
if ( m_av_out_fmt_ctx )
{
avformat_free_context(m_av_out_fmt_ctx);
m_av_out_fmt_ctx = NULL;
/// std::cout << " ::AvEncoder:FlushMode: cleared m_av_out_fmt_ctx" << std::endl;
}
if ( m_sws_ctx )
{
sws_freeContext( m_sws_ctx );
/// std::cout << " ::AvEncoder:FlushMode: cleared m_sws_ctx" << std::endl;
m_sws_ctx = NULL;
}
} // END void AvEncoder::FlushMode()
#if defined(USE_MAIN_DRIVER)
int main( int argc, char* argv[] )
{
int rtn_val = 0;
std::string fname( "enc.mp4" );
(void) argc;
(void) argv;
AvEncoder ave;
av_register_all();
ave.Open( fname, 1024, 768 );
return rtn_val;
}
#endif // #if defined(USE_MAIN_DRIVER)
#if ! defined(AVENCODER_H_INCLUDED_)
#define AVENCODER_H_INCLUDED_
#include <string>
#include <queue>
#include <functional>
#include <cstdint>
//#include <cstdatomic>
#include <mutex>
#include <pthread.h>
extern "C"
{
#define __STDC_CONSTANT_MACROS
#include <libavutil/opt.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavcodec/avcodec.h>
};
class AvEncoder
{
public :
static const int AVE_IDLE_STATUS = 0;
static const int AVE_BUSY_STATUS = 1;
static const int AVE_FLUSH_STATUS = 2;
static const int AVE_EOF_STATUS = 3;
static const enum AVCodecID DEFAULT_CODEC = AV_CODEC_ID_H264;
static const enum AVPixelFormat DEFAULT_PIX_FMT = AV_PIX_FMT_YUV420P;
static const int DEFAULT_FPS = 60;
static const int DEFAULT_GOP = 10; // every 250 ms
static const int DEFAULT_BPS = 6e6; // 6 Mbits/s
static const int AVE_UNKNOW_LIB_ERROR = -1;
AvEncoder()
:
m_av_encode_codec( NULL ),
m_av_codec_ctx( NULL ),
/// Output
m_av_out_fmt_ctx( NULL ),
m_src_frame( NULL ),
m_dst_frame( NULL ),
m_sws_ctx( NULL ),
m_stream_idx( 0 ),
m_frame_count( 0 ),
m_rgb_size( 0 ),
m_video_time( 0.0 ),
m_status( AVE_IDLE_STATUS ),
m_running( false )
{
/// Nothing to do
}
int Open( std::string fname, unsigned int w_, unsigned int h_,
/// optional
enum AVCodecID = DEFAULT_CODEC, int fps = DEFAULT_FPS, int gop = DEFAULT_GOP, int bps = DEFAULT_BPS );
int SendNewFrame( uint8_t * RGBFrame );
int EncStatus() { return m_status; }
void Run();
void Close()
{
m_running = false;
sleep( 1 );
}
//
~AvEncoder()
{
}
protected :
AVCodec * m_av_encode_codec;
AVCodecContext * m_av_codec_ctx;
/// Output
AVFormatContext * m_av_out_fmt_ctx;
AVFrame * m_src_frame;
AVFrame * m_dst_frame;
SwsContext * m_sws_ctx;
unsigned int m_stream_idx;
unsigned int m_frame_count;
int m_rgb_size;
double m_video_time;
int m_status;
bool m_running;
AVPacket m_av_packet;
std::queue<uint8_t*> m_frame_queue;
std::mutex m_frame_queue_mtx;
///
/// Both return status used internally
///
int Encode( uint8_t * RGBFrame );
int SetupFrameBuffers( unsigned int w_, unsigned int h_ );
/// bool FrameDelay()
/// {
/// // scoped lock
/// std::lock_guard<std::mutex> lock(m_frame_queue_mtx);
/// return m_frame_queue.empty();
/// } // END bool FrameDelay()
void PushFrame( uint8_t * f_ )
{
// scoped lock
std::lock_guard<std::mutex> lock(m_frame_queue_mtx);
m_frame_queue.push( f_ );
}
uint8_t * PopFrame()
{
uint8_t * f_;
// scoped lock
std::lock_guard<std::mutex> lock(m_frame_queue_mtx);
f_ = m_frame_queue.front();
m_frame_queue.pop();
return f_;
}
void FlushMode();
}; // END class AvEncoder
#endif // #if ! defined(AVENCODER_H_INCLUDED_)
@LinuxwitChdoCtOr
Copy link
Author

include "AvEncoder.h"

std::string g_fname( "test.mp4" );

void * runEncoder(void * encoder)
{
( ( AvEncoder * ) encoder )->Run();
pthread_exit(NULL);
}

AvEncoder av_enc;
av_register_all();
av_enc.Open( g_fname, w, h );

pthread_t av_thread;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
if ( pthread_create( &av_thread, &attr, runEncoder, &av_enc ) ) exit( 1 );

av_enc.SendNewFrame( buffer );

g++ -g -std=c++0x -Wall -Wextra -DDEBUG -DGL_GLEXT_PROTOTYPES -I/usr/include/libdrm -I/usr/include/ffmpeg av_encode.cpp AvEncoder.cpp -Wl,-rpath=. libmyutil.so -lavformat -lavcodec -lavutil -lswscale -lavdevice -lGLEW -lGLU -lGL -lX11 -lm -lrt -o av_encode

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment