Skip to content

Instantly share code, notes, and snippets.

@jusonqiu
Last active March 30, 2020 09:18
Show Gist options
  • Save jusonqiu/59efcf95f76a8fd293d5 to your computer and use it in GitHub Desktop.
Save jusonqiu/59efcf95f76a8fd293d5 to your computer and use it in GitHub Desktop.
FFMPEG把普通视频解码成yuv同时编成mjpeg, 把音频解码成pcm并调整参数。 初始化编码器和解码器 配置编码器 编码与解码 调整帧率.
/******************************************************************************
* Copyright (C) 2015 All rights reserved.
*
* File: m2mjpeg_adpcm.c
* Author: Zhaosheng Qiu <juson163@yeah.net>
* Time: 2015/06/29
* Descriptions:
*
******************************************************************************/
#include <stdio.h>
#include <converter/m2mjpeg_adpcm.h>
#include <android/log.h>
#include <data.h>
#include <adpcm.h>
#include <toy_config.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
#define MJA_DEBUG 1
//extern JavaVM *toy_app_vm;
#define DEBUG_AUDIO_DECODE_FILE 0
#define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
#define AUDIO_RINGBUFFER_SIZE 1024*1024*4 /* 2MB */
#if DEBUG_AUDIO_DECODE_FILE
FILE *debug_fp = NULL;
#endif
static int init_audio(AVCodecContext *c, int64_t layout,
int sample_rate, enum AVSampleFormat sample_fmt,
struct SwrContext **swr, int *nb_samples,
int *nb_channels
){
struct SwrContext *swr_ctx = swr_alloc();
if (!swr_ctx){
LOGE("ERROR:Cound alloc audio swr\n");
return -1;
}
av_opt_set_int(swr_ctx, "in_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt",c->sample_fmt, 0);
av_opt_set_int(swr_ctx, "out_channel_layout", layout, 0);
av_opt_set_int(swr_ctx, "out_sample_rate", sample_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", sample_fmt, 0);
av_opt_set_int(swr_ctx, "in_channel_layout",
av_get_default_channel_layout(c->channels), 0);
if (swr_init(swr_ctx) < 0){
LOGE("ERROR:Count not init audio swr\n");
swr_free(swr_ctx);
return -1;
}
*swr = swr_ctx;
*nb_samples = av_rescale_rnd(c->frame_size, sample_rate, c->sample_rate, AV_ROUND_UP);
*nb_channels = av_get_channel_layout_nb_channels(layout);
LOGI("nb_samples:%d, nb_channels:%d", *nb_samples, *nb_channels);
return 0;
}
int m2mjpeg_adpcm_init(const char *filename,
int width,
int height,
int frames_per_sec,
int bitrate,
int samplerate,
int block_size,
queue_t *msg_q,
m2mjpeg_adpcm_cbfunc cb,
m2mjpeg_adpcm_t **out_m2mjpeg_adpcm){
AVFormatContext *pFormatCtx = NULL;
int i,videoStream, audioStream;
AVCodecContext *pCodecCtx[MJA_COUNT] ;
AVCodec *pCodec[MJA_COUNT];
AVStream *pStream[MJA_COUNT];
AVDictionary *optionsDict = NULL;
struct SwsContext *sws_ctx = NULL;
AVCodec *pEnCodec = NULL;
AVCodecContext *pEnCodecCtx = NULL;
m2mjpeg_adpcm_t *m2mjpeg_adpcm = NULL;
m2mjpeg_adpcm = (m2mjpeg_adpcm_t *)av_malloc(sizeof(m2mjpeg_adpcm_t));
if (m2mjpeg_adpcm == NULL)
return -1;
m2mjpeg_adpcm->queue = queue_create_limited(block_size);
if (m2mjpeg_adpcm->queue == NULL){
LOGE("ERROR:Create queue %d blocks failed", block_size);
return -1;
}
m2mjpeg_adpcm->audio_queue = queue_create_limited(3000);
if (m2mjpeg_adpcm->audio_queue == NULL){
LOGE("ERROR:Create audio_queue %d blocks failed", 3000);
return -1;
}
pthread_mutex_init(&m2mjpeg_adpcm->mutex, NULL);
pthread_cond_init(&m2mjpeg_adpcm->cond, NULL);
m2mjpeg_adpcm->ctl = MJA_STOP;
av_register_all();
LOGI("avformat_open file \"%s\"", filename);
if(avformat_open_input(&pFormatCtx, filename, NULL, NULL) != 0 )
{
LOGE("ERROR:avformat_open_input file: %s", filename);
return -1;
}
m2mjpeg_adpcm->pFormatCtx = pFormatCtx;
if(avformat_find_stream_info(m2mjpeg_adpcm->pFormatCtx, NULL)<0){
LOGE(" Couldn't find stream information");
return -1;
}
// av_dump_format(m2mjpeg_adpcm->pFormatCtx, 0, filename, 0);
// Find the first video stream and audio stream
videoStream = -1;
audioStream = -1;
int found = 0;
for(i=0; i<pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
pStream[MJA_VIDEO] = pFormatCtx->streams[i];
}else if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
audioStream = i;
}
}
if(videoStream==-1){
LOGE("Didn't find a video stream");
return -1;
}
if(audioStream==-1){
LOGE("Didn't find a audio stream");
return -1;
}
m2mjpeg_adpcm->vdid = videoStream;
m2mjpeg_adpcm->auid = audioStream;
m2mjpeg_adpcm->fps = frames_per_sec;
m2mjpeg_adpcm->src_fps = av_q2d(pStream[MJA_VIDEO]->avg_frame_rate);
m2mjpeg_adpcm->time_base = &(pStream[MJA_VIDEO]->time_base);
m2mjpeg_adpcm->duration = pFormatCtx->duration;
LOGI("Framerate %f/%f\n", m2mjpeg_adpcm->fps, m2mjpeg_adpcm->src_fps);
// Get a pointer to the codec context for the video stream
pCodecCtx[MJA_VIDEO]=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec[MJA_VIDEO]=avcodec_find_decoder(
pCodecCtx[MJA_VIDEO]->codec_id);
if(pCodec[MJA_VIDEO]==NULL) {
LOGE("Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open2(pCodecCtx[MJA_VIDEO],
pCodec[MJA_VIDEO], &optionsDict)<0){
LOGE("Could not open codec");
return -1;
}
m2mjpeg_adpcm->pCodecCtx[MJA_VIDEO] = pCodecCtx[MJA_VIDEO];
// Allocate video frame
m2mjpeg_adpcm->pDecodeFrame = av_frame_alloc();
if (m2mjpeg_adpcm->pDecodeFrame == NULL){
LOGE("Error Allocate an AVFrame structure");
return -1;
}
m2mjpeg_adpcm->pSwsFrame = av_frame_alloc();
if(m2mjpeg_adpcm->pSwsFrame == NULL){
LOGE("Error Allocate an AVFrame structure");
return -1;
}
LOGI("AVFrame Format pit %dx%d", width, height);
m2mjpeg_adpcm->width = width;
m2mjpeg_adpcm->height = height;
// Determine required buffer size and allocate buffer
int numBytes=avpicture_get_size(AV_PIX_FMT_YUV420P,
pCodecCtx[MJA_VIDEO]->width,
pCodecCtx[MJA_VIDEO]->height);
uint8_t *buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
m2mjpeg_adpcm->sws_ctx =
sws_getContext
(
pCodecCtx[MJA_VIDEO]->width,
pCodecCtx[MJA_VIDEO]->height,
pCodecCtx[MJA_VIDEO]->pix_fmt,
width,
height,
AV_PIX_FMT_YUVJ420P,
SWS_BILINEAR,
NULL,
NULL,
NULL
);
// Assign appropriate parts of buffer to image planes in AVPicture
avpicture_fill((AVPicture *)m2mjpeg_adpcm->pSwsFrame,
buffer, AV_PIX_FMT_YUVJ420P,
width, height);
pStream[MJA_AUDIO] = pFormatCtx->streams[audioStream];
m2mjpeg_adpcm->pCodecCtx[MJA_AUDIO] = pCodecCtx[MJA_AUDIO] = pStream[MJA_AUDIO]->codec;
pCodec[MJA_AUDIO] = avcodec_find_decoder(
m2mjpeg_adpcm->pCodecCtx[MJA_AUDIO]->codec_id) ;
if(pCodec[MJA_AUDIO] != NULL){
if(avcodec_open2(m2mjpeg_adpcm->pCodecCtx[MJA_AUDIO],
pCodec[MJA_AUDIO], NULL)<0){
LOGE("Could not open codec");
return -1;
}
}else{
LOGE("NO AUDIO DATA");
return -1;
}
LOGI("SERCH AV_CODEC_ID_MJPEG...");
AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
if (codec == NULL){
LOGE("ERROR:EnCoder Codec not found");
return -1;
}
LOGI("AV_CODEC_ID_MJPEG Found");
m2mjpeg_adpcm->pEnCodec = codec;
AVCodecContext *ctx = avcodec_alloc_context3(m2mjpeg_adpcm->pEnCodec);
if (!ctx){
LOGE("ERROR: alloc encode context failed");
return -1;
}
ctx->bit_rate = bitrate;
ctx->width = width;
ctx->height = height;
ctx->time_base = (AVRational ){1, 16};
ctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
m2mjpeg_adpcm->pSwsFrame->format = ctx->pix_fmt;
m2mjpeg_adpcm->pSwsFrame->width = width;
m2mjpeg_adpcm->pSwsFrame->height = height;
if (avcodec_open2(ctx, m2mjpeg_adpcm->pEnCodec, NULL) < 0) {
LOGE("ERROR: Could not open codec");
return -1;
}
m2mjpeg_adpcm->pEnCodecCtx = ctx;
int ret = av_image_alloc(m2mjpeg_adpcm->pSwsFrame->data,
m2mjpeg_adpcm->pSwsFrame->linesize,
ctx->width, ctx->height,ctx->pix_fmt, 32);
if (ret < 0){
LOGE("ERROR:Could not Alloc Image");
return -1;
}
m2mjpeg_adpcm->samplerate = 32000;
m2mjpeg_adpcm->sample_fmt = AV_SAMPLE_FMT_S16;
m2mjpeg_adpcm->ch_layout = AV_CH_LAYOUT_STEREO;
if(init_audio(pCodecCtx[MJA_AUDIO], m2mjpeg_adpcm->ch_layout,
m2mjpeg_adpcm->samplerate, m2mjpeg_adpcm->sample_fmt,
&m2mjpeg_adpcm->audio_swr_ctx, &m2mjpeg_adpcm->nb_samples,
&m2mjpeg_adpcm->nb_channels
) < 0){
LOGE("ERROR:INIT AUDIO FAILED\n");
return -1;
}
if(av_samples_alloc_array_and_samples(&m2mjpeg_adpcm->abuf, &m2mjpeg_adpcm->linesize,
m2mjpeg_adpcm->nb_channels,m2mjpeg_adpcm->nb_samples,
m2mjpeg_adpcm->sample_fmt, 0) < 0 ){
LOGE("Could not allocate destination samples\n");
return -1;
}
LOGI("dst_linesize:%d, dst_nb_channels:%d, dst_nb_samples:%d\n",
m2mjpeg_adpcm->linesize, m2mjpeg_adpcm->nb_channels, m2mjpeg_adpcm->nb_samples);
#if 0
ringbuffer_t *rb = rb_new(AUDIO_RINGBUFFER_SIZE);
if (rb == NULL){
LOGE("Init ringbuffer failed in decoder");
return -1;
}
m2mjpeg_adpcm->audio_rb = rb;
pthread_mutex_init(&m2mjpeg_adpcm->rb_mutex, NULL);
pthread_cond_init(&m2mjpeg_adpcm->rb_cond, NULL);
#endif
m2mjpeg_adpcm->msg_q = msg_q;
m2mjpeg_adpcm->cb = cb;
*out_m2mjpeg_adpcm = m2mjpeg_adpcm;
//TODO free if error
return 0;
}
static int rb_push_audio_data(ringbuffer_t *rb,
pthread_mutex_t *mutex,
pthread_cond_t *cond,
char *data,
size_t size){
size_t rb_can = 0;
int ret = 0;
pthread_mutex_lock(mutex);
while (1){
if(rb_can = rb_can_write(rb) >= size){
ret = rb_write(rb, data, size);
pthread_cond_signal(cond);
break;
}
pthread_cond_wait(cond, mutex);
}
pthread_mutex_unlock(mutex);
return ret;
}
int m2mjpeg_adpcm_read_audio_data(m2mjpeg_adpcm_t *mja, uint8_t *data, size_t size){
ringbuffer_t *rb = mja->audio_rb;
int ret = 0;
pthread_mutex_lock(&mja->rb_mutex);
while (1){
if(rb_can_read(rb) >= size){
ret = rb_read(rb, data, size);
pthread_cond_signal(&mja->rb_cond);
break;
}
pthread_cond_wait(&mja->rb_cond, &mja->rb_mutex);
}
pthread_mutex_unlock(&mja->rb_mutex);
return ret;
}
size_t m2mjpeg_adpcm_get_audio_header_len(m2mjpeg_adpcm_t *mja){
return sizeof(adpcm_header_t);
}
int m2mjpeg_adpcm_read_audio_data_with_header(m2mjpeg_adpcm_t *mja, uint8_t *data, size_t size){
int ret = 0;
ringbuffer_t *rb = mja->audio_rb;
int mlen = sizeof(adpcm_header_t);
adpcm_header_t head = {
.flag = ZZWC,
.frame_len = size-mlen,
};
pthread_mutex_lock(&mja->rb_mutex);
while (1){
if(rb_can_read(rb) >= size){
memcpy(data, &head, mlen);
ret = rb_read(rb, data+mlen, size);
pthread_cond_signal(&mja->rb_cond);
break;
}
pthread_cond_wait(&mja->rb_cond, &mja->rb_mutex);
}
pthread_mutex_unlock(&mja->rb_mutex);
return ret;
}
int m2mjpeg_adpcm_get_silen_voice(data_buffer_t **db){
int mlen = sizeof(adpcm_header_t);
int pkg_size = mlen+ 2000;
adpcm_header_t head = {
.flag = ZZWC,
.frame_len = 2000,
};
data_buffer_t *pbuf = (data_buffer_t *)av_malloc(sizeof(data_buffer_t));
if (pbuf == NULL){
LOGE("ERROR:Can't INIT data buffer");
return -1;
}else{
memset(pbuf, 0, sizeof(data_buffer_t));
pbuf->data = (uint8_t*) av_malloc(pkg_size+mlen);
if (pbuf->data == NULL){
LOGE("ERROR:Can't INIT data buffer");
return -1;
}
memset(pbuf->data, 0, pkg_size+mlen);
memcpy(pbuf->data, &head, mlen);
pbuf->size = pkg_size;
*db = pbuf;
}
return 0;
}
static int custom_push_audio_data(queue_t *q,
uint8_t *indata,
size_t insize,
data_buffer_t **abuf){
int mlen = sizeof(adpcm_header_t);
int pkg_size = mlen+ 2000;
uint8_t *data =indata;
int size = insize;
int rsize = 0;
adpcm_header_t head = {
.flag = ZZWC,
.frame_len = 2000,
};
data_buffer_t *pbuf = *abuf;
NEXT:
if (pbuf == NULL){
pbuf = (data_buffer_t *)av_malloc(sizeof(data_buffer_t));
if (pbuf == NULL){
LOGE("ERROR:Can't INIT data buffer");
return -1;
}else{
memset(pbuf, 0, sizeof(data_buffer_t));
pbuf->data = (uint8_t*) av_malloc(pkg_size+mlen);
if (pbuf->data == NULL){
LOGE("ERROR:Can't INIT data buffer");
return -1;
}
memcpy(pbuf->data, &head, mlen);
pbuf->size = mlen;
}
}
rsize = pkg_size - pbuf->size; /* need size */
if (size > rsize){
memcpy(pbuf->data+pbuf->size, data , rsize);
pbuf->size += rsize;
queue_put_wait(q, (void *)pbuf);
pbuf = NULL;
data += rsize;
size -= rsize;
goto NEXT;
}else{
memcpy(pbuf->data+pbuf->size, data , size);
pbuf->size += size;
*abuf = pbuf;
return 0;
}
}
static int queue_push_audio_data(queue_t *q,
uint8_t *data,
size_t size){
int mlen = sizeof(adpcm_header_t);
adpcm_header_t head = {
.flag = ZZWC,
.frame_len = size,
};
data_buffer_t *pbuf =
(data_buffer_t *)av_malloc(sizeof(data_buffer_t));
if (pbuf == NULL){
LOGE("ERROR:Can't INIT data buffer");
return -1;
}
pbuf->data = (uint8_t*) av_malloc(size+mlen);
if (pbuf->data == NULL){
LOGE("ERROR:Can't INIT data buffer");
return -1;
}
memcpy(pbuf->data, &head, mlen );
memcpy(pbuf->data+mlen, data, size);
pbuf->size = size+mlen;
#if 0
//DEBUG_AUDIO_DECODE_FILE
fwrite(pbuf->data, 1, pbuf->size, debug_fp);
m2mjpeg_adpcm_buffer_free(pbuf);
return 0;
#endif
return queue_put_wait(q, (void *)pbuf);
}
static int queue_push_video_data(queue_t *q,
uint8_t *data,
size_t size,
int64_t pts){
int mlen = sizeof(mjpeg_header_t);
mjpeg_header_t head = {
.flag = ZZDC,
.frame_len = size,
};
data_buffer_t *pbuf =
(data_buffer_t *)av_malloc(sizeof(data_buffer_t));
if (pbuf == NULL){
LOGE("ERROR:Can't INIT data buffer");
return -1;
}
pbuf->data = (uint8_t*) av_malloc(size+mlen);
if (pbuf->data == NULL){
LOGE("ERROR:Can't INIT data buffer");
return -1;
}
memcpy(pbuf->data, &head, mlen );
memcpy(pbuf->data+mlen, data, size);
pbuf->size = size+mlen;
pbuf->pts = pts;
#if 0
//DEBUG_AUDIO_DECODE_FILE
fwrite(pbuf->data, 1, pbuf->size, debug_fp);
m2mjpeg_adpcm_buffer_free(pbuf);
return 0;
#endif
return queue_put_wait(q, (void *)pbuf);
}
static int queue_push_data(queue_t *q,
uint8_t *data,
size_t size){
data_buffer_t *pbuf =
(data_buffer_t *)av_malloc(sizeof(data_buffer_t));
if (pbuf == NULL){
LOGE("ERROR:Can't INIT data buffer");
return -1;
}
pbuf->data = (uint8_t*) av_malloc(size);
if (pbuf->data == NULL){
LOGE("ERROR:Can't INIT data buffer");
return -1;
}
memcpy(pbuf->data, data, size);
pbuf->size = size;
return queue_put_wait(q, (void *)pbuf);
}
void static dump_file_header(mja_file_header_t *fr){
LOGI("MJA FILE HEADER:\n"
"audio_sample_rate:%d\n"
"video_frames:%d\n"
"video_frame_interval:%d\n"
"crc_32:%d\n", fr->audio_sample_rate,
fr->video_frames, fr->video_frame_interval, fr->crc_32);
unsigned char cfr[sizeof(mja_file_header_t)];
memcpy(cfr, fr, sizeof(mja_file_header_t));
int i = 0;
LOGI("HEX:\n"
"%X%X %X%X \n"
"%X%X %X%X \n"
"%X%X %X%X \n"
"%X%X %X%X \n"
,cfr[i++], cfr[i++], cfr[i++], cfr[i++]
,cfr[i++], cfr[i++], cfr[i++], cfr[i++]
,cfr[i++], cfr[i++], cfr[i++], cfr[i++]
);
}
//ringbuffer_t *foo_rb = NULL;
void *m2mjpeg_adpcm_run(void *data) {
m2mjpeg_adpcm_t *pMja = (m2mjpeg_adpcm_t *) data;
JavaVM *vm = toy_app_vm;
AVPacket packet;
int frameFinished = 0;
AVFormatContext *pFormatCtx = pMja->pFormatCtx;
AVCodecContext *pCodecCtx[MJA_COUNT] ;
pCodecCtx[MJA_VIDEO] = pMja->pCodecCtx[MJA_VIDEO];
pCodecCtx[MJA_AUDIO] = pMja->pCodecCtx[MJA_AUDIO];
AVFrame *pFDec = pMja->pDecodeFrame;
AVFrame *pFSws = pMja->pSwsFrame;
struct SwsContext *sws_ctx = pMja->sws_ctx;
queue_t *queue = pMja->queue;
int videoStream = pMja->vdid;
int audioStream = pMja->auid;
int ret = 0, decoded =0;
adpcm_state state;
const char *thread_titile = "m2mjpeg_adpcm";
// JNIEnv *env;
// JavaVMAttachArgs thread_spec = {JNI_VERSION_1_4, thread_titile, NULL};
// jint res = (*vm)->AttachCurrentThread(vm, &env, &thread_spec);
// if (res || env == NULL){
// LOGE("ERROR:Cat't attach m2mjpeg_adpcm thread");
// return NULL;
// }
// LOGI("%s in", thread_titile);
uint8_t **abuf = pMja->abuf;
int abuf_size = 0;
int max_nb_samples = pMja->nb_samples;
AVPacket pkt;
av_init_packet(&pkt);
int got_encode_frame;
int result = 0;
#if DEBUG_AUDIO_DECODE_FILE
debug_fp = fopen("/sdcard/debug_media_adpcm.audio", "wb");
#endif
//m2mjpeg_adpcm_pause(pMja);
LOGI("m2mjpeg adpcm in");
double vfs = pMja->fps*pMja->duration/1000000;
double vfi = 1000000/pMja->fps;
double oldts = 1000000/pMja->src_fps;
double newts = vfi;
uint32_t oldcount = 0;
uint32_t newcount = 0;
uint8_t audio_buffer[3000];
uint32_t audio_buf_size = 0;
data_buffer_t *pbuf = NULL;
queue_t *msg_q = pMja->msg_q;
mjacontrol_t last_ctl = MJA_STOP;
mja_file_header_t file_header = {
.audio_sample_rate = pMja->samplerate,
.video_frames = (uint32_t)vfs,
.video_frame_interval = (uint32_t)vfi,
.crc_32 = 0,
};
file_header.crc_32 = crc32(&file_header, sizeof(file_header)-sizeof(uint32_t));
#if MJA_DEBUG
//dump_file_header(&file_header);
#endif
// LOGI("m2mjpeg waiting for start");
// pthread_mutex_lock(&pMja->mutex);
// pthread_cond_wait(&pMja->cond, &pMja->mutex);
// pthread_mutex_unlock(&pMja->mutex);
// LOGI("m2mjpeg waiting for start...done");
sleep(1);
queue_push_data(queue, &file_header, sizeof(file_header));
pthread_mutex_lock(&pMja->mutex);
pMja->ctl = MJA_START;
pthread_mutex_unlock(&pMja->mutex);
LOGI("ffmpeg in");
while(av_read_frame(pFormatCtx, &packet)>=0) {
switch (pMja->ctl){
case MJA_STOP:
case MJA_WAITING:
goto OUT;
case MJA_START:
if (last_ctl != MJA_START){
pMja->cb(msg_q, MJA_W_STATUS, MJA_START, -1);/*reflet mediaplayer2*/
last_ctl = MJA_START;
}
break;
case MJA_PAUSE:
LOGI("m2mjpeg pause...");
last_ctl = MJA_PAUSE;
pMja->cb(msg_q, MJA_W_STATUS, MJA_PAUSE, -1);/*reflet mediaplayer2*/
pthread_mutex_lock(&pMja->mutex);
pthread_cond_wait(&pMja->cond, &pMja->mutex);
pthread_mutex_unlock(&pMja->mutex);
LOGI("m2mjpeg pause...out");
break;
default:
break;
}
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx[MJA_VIDEO],
pFDec, &frameFinished, &packet);
// Save the frame to disk
if (frameFinished)
{
double pts = av_frame_get_best_effort_timestamp(pFDec);
pts = av_rescale_q ( pts, *(pMja->time_base), AV_TIME_BASE_Q );
int percent = (int)(pts/pMja->duration *100);
// LOGI("pts: %f/%" PRId64 " = %d ", pts,
// pMja->duration , percent);
//fix frame sample
oldcount++;
double old = oldcount*oldts;
double new = newcount*newts;
if (old < new){
goto PKT_FREE;
}
newcount++;
sws_scale(sws_ctx,
(uint8_t const * const *)pFDec->data,
pFDec->linesize,
0,
pCodecCtx[MJA_VIDEO]->height,
pFSws->data,
pFSws->linesize);
ret = avcodec_encode_video2(pMja->pEnCodecCtx, &pkt,
pFSws, &got_encode_frame);
// LOGI("vidieo:%5d ", pkt.size);
#if 1
if (ret ==0 && got_encode_frame){
if ( queue_push_video_data(queue, pkt.data,
pkt.size, percent) < 0){
LOGE("ERROR:Can not push data to queue");
break;
}
}
#endif
}
}else if (packet.stream_index == audioStream){
ret = avcodec_decode_audio4(pCodecCtx[MJA_AUDIO],
pFDec, &frameFinished, &packet);
if (ret < 0){
LOGE("ERROR:Decoding Audio Frame (%s)\n", av_err2str(ret));
break;
}
if (frameFinished){
#if 0
LOGI("#nb_samples:%d", pMja->nb_samples);
pMja->nb_samples = av_rescale_rnd(
swr_get_delay(pMja->audio_swr_ctx, pFDec->data) +pFDec->nb_samples,
pMja->samplerate,
pCodecCtx[MJA_AUDIO]->sample_rate,
AV_ROUND_UP);
LOGI("=====> %d, %d, %d, %d, %d",
pMja->nb_channels,
pMja->nb_samples,
pFDec->nb_samples,
pMja->samplerate,
pCodecCtx[MJA_AUDIO]->sample_rate);
if (pMja->nb_samples > max_nb_samples) {
av_freep(&abuf[0]);
LOGI("alloc samples: nb_channels = %d, nb_samples = %d", pMja->nb_channels, pMja->nb_samples);
ret = av_samples_alloc(abuf, &pMja->linesize, pMja->nb_channels,
pMja->nb_samples, pMja->sample_fmt, 1);
if (ret < 0){
LOGI("alloc samples buffer failed");
break;
}
max_nb_samples = pMja->nb_samples;
}
#endif
/* convert to destination format */
ret = swr_convert(pMja->audio_swr_ctx, abuf, pMja->nb_samples,
(const uint8_t **)pFDec->data, pFDec->nb_samples);
if (ret < 0) {
LOGE( "Error while converting\n");
break;
}
abuf_size = av_samples_get_buffer_size(&pMja->linesize, pMja->nb_channels,
ret, pMja->sample_fmt, 1);
if (abuf_size < 0) {
LOGE( "Could not get sample buffer size\n");
break;
}
// LOGI("audio:%5d/%5d ",ret, pFDec->nb_samples);
// fwrite(abuf[0], 1, abuf_size, debug_fp);
unsigned char outs[abuf_size/4];
unsigned char *ins = (unsigned char *)abuf[0];
adpcm_coder((short*) ins,
(char*)outs, abuf_size/2, &state);
#if 0
trace();
if ((audio_buf_size = custom_push_audio_data(pMja->audio_queue, 2000,
audio_buffer,
audio_buf_size,
outs,
abuf_size/4) ) < 0){
LOGE("never here");
break;
}
trace();
#if 0
if (rb_push_audio_data(pMja->audio_rb,
&pMja->rb_mutex,
&pMja->rb_cond,
outs,
abuf_size/4) < 0){
LOGE("ERROR:push audio failed");
break;
}
fwrite(outs, 1, abuf_size/4, debug_fp);
#endif
#else
if ( custom_push_audio_data(pMja->audio_queue,
outs,
abuf_size/4,
&pbuf
) < 0){
LOGE("ERROR:Can not push data to queue");
break;
}
#endif
} //if
} //else
PKT_FREE:
av_free_packet(&pkt);
av_free_packet(&packet);
}//while
// TODO av_free(au_convert_ctx);
OUT:
#if DEBUG_AUDIO_DECODE_FILE
fclose(debug_fp);
#endif
// m2mjpeg_adpcm_stop(pMja);
pthread_mutex_lock(&pMja->mutex);
pMja->ctl = MJA_STOP;
pthread_mutex_unlock(&pMja->mutex);
pMja->cb(msg_q, MJA_W_STATUS, MJA_STOP, -1);/*reflet mediaplayer2*/
LOGI("%s out", thread_titile);
return NULL;
}
void m2mjpeg_adpcm_buffer_free(data_buffer_t *db){
if (db != NULL){
if (db->data != NULL)
av_free(db->data);
av_free(db);
}
}
void m2mjpeg_adpcm_pause(m2mjpeg_adpcm_t *mja){
pthread_mutex_lock(&mja->mutex);
mja->ctl = MJA_PAUSE;
LOGI("PAUSE M2Mjpeg");
pthread_cond_signal(&mja->cond);
pthread_mutex_unlock(&mja->mutex);
}
void m2mjpeg_adpcm_play(m2mjpeg_adpcm_t *mja){
pthread_mutex_lock(&mja->mutex);
mja->ctl = MJA_START;
pthread_cond_broadcast(&mja->cond);
pthread_mutex_unlock(&mja->mutex);
}
static int m2mjpeg_adpcm_free(m2mjpeg_adpcm_t *mja){
av_free(mja->pSwsFrame);
av_free(mja->pDecodeFrame);
av_free(&mja->audio_swr_ctx);
if (mja->abuf)
av_freep(&mja->abuf[0]);
av_freep(mja->abuf);
//sws_ctx ?? TODO
avcodec_close(mja->pCodecCtx[MJA_VIDEO]);
avcodec_close(mja->pCodecCtx[MJA_AUDIO]);
avcodec_close(mja->pEnCodecCtx);
avformat_close_input(&mja->pFormatCtx);
queue_destroy_complete(mja->queue, m2mjpeg_adpcm_buffer_free);
queue_destroy_complete(mja->audio_queue, m2mjpeg_adpcm_buffer_free);
// rb_free(mja->audio_rb);
pthread_mutex_destroy(&mja->mutex);
pthread_cond_destroy(&mja->cond);
// pthread_cond_destroy(&mja->rb_cond);
// pthread_mutex_destroy(&mja->rb_mutex);
mja->cb(mja->msg_q, MJA_W_ERROR, 0, 0);/* free done */
// av_free(mja);
return 0;
}
static void *m2mjpeg_asyn_free(void *param){
m2mjpeg_adpcm_t *mja = (m2mjpeg_adpcm_t *)param;
LOGI("m2mjpeg free...");
while(mja->ctl != MJA_STOP){
sleep(2);
break;
}
m2mjpeg_adpcm_free(mja);
LOGI("m2mjpeg free...done");
}
int m2mjpeg_adpcm_release(m2mjpeg_adpcm_t *mja){
if (mja == NULL)
return -2;
pthread_t thread;
if (0 != pthread_create(&thread, NULL, m2mjpeg_asyn_free, (void *)mja) ){
return -1;
}
return 0;
}
void m2mjpeg_adpcm_stop(m2mjpeg_adpcm_t *mja){
if (mja == NULL && mja->ctl == MJA_STOP)
return;
pthread_mutex_lock(&mja->mutex);
if (mja->ctl != MJA_STOP)
mja->ctl = MJA_WAITING;
pthread_cond_broadcast(&mja->cond);
pthread_mutex_unlock(&mja->mutex);
queue_t *p = mja->audio_queue;
pthread_mutex_lock(p->mutex);
pthread_cond_broadcast(p->cond_put);
pthread_mutex_unlock(p->mutex);
queue_t *q = mja->queue;
pthread_mutex_lock(q->mutex);
pthread_cond_broadcast(q->cond_put);
pthread_mutex_unlock(q->mutex);
m2mjpeg_asyn_free(mja);
// m2mjpeg_adpcm_release(mja);
}
/******************************************************************************
* Copyright (C) 2015 All rights reserved.
*
* File: m2mjpeg_adpcm.h
* Author: Zhaosheng Qiu <juson163@yeah.net>
* Time: 2015/06/29
* Descriptions:
*
******************************************************************************/
#ifndef __M2MJPEG_ADPCM_H__
#define __M2MJPEG_ADPCM_H__
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavutil/timestamp.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <queue.h>
#include <stdint.h>
#include <inttypes.h>
#include <ringbuffer.h>
#include <pthread.h>
#include <semaphore.h>
#include <data.h>
#define ZZDC 0x63643030
#define ZZWC 0x62773130
typedef struct _file_header{
uint32_t audio_sample_rate;
uint32_t video_frames;
uint32_t video_frame_interval;
uint32_t crc_32;
}mja_file_header_t;
typedef enum _mjatype{
MJA_VIDEO=0,
MJA_AUDIO,
MJA_COUNT,
}mjatype_t;
typedef enum _mjacontrol{
MJA_STOP =0,
MJA_START,
MJA_PAUSE,
MJA_WAITING,
}mjacontrol_t ;
/*warn: reflet mediapler2 */
typedef enum _mjawhat{
MJA_W_STATUS=10,
MJA_W_ERROR,
}mjawhat_t;
typedef struct _mjpeg_header{
uint32_t flag;
uint32_t frame_len;
// uint32_t pre_frame_len;
}mjpeg_header_t;
typedef struct _adpcm_header{
uint32_t flag;
uint32_t frame_len;
}adpcm_header_t;
typedef int (*m2mjpeg_adpcm_cbfunc)(queue_t*, int , int, int);
typedef struct _m2mjpeg_adpcm_t{
int width;
int height;
double fps;
double src_fps;
int64_t duration;
AVRational *time_base;
uint32_t samplerate;
int linesize;
enum AVSampleFormat sample_fmt;
int64_t ch_layout;
struct SwrContext *audio_swr_ctx;
int nb_samples;
int nb_channels;
uint8_t **abuf;
int vdid ;
int auid;
AVFormatContext *pFormatCtx;
AVCodecContext *pCodecCtx[MJA_COUNT];
AVFrame *pDecodeFrame;
AVFrame *pSwsFrame;
AVCodec *pEnCodec;
AVCodecContext *pEnCodecCtx;
struct SwrContext *sws_ctx;
queue_t *queue;
queue_t *audio_queue;
ringbuffer_t *audio_rb;
pthread_mutex_t rb_mutex;
pthread_cond_t rb_cond;
mjacontrol_t ctl;
pthread_mutex_t mutex;
pthread_cond_t cond;
queue_t *msg_q;
m2mjpeg_adpcm_cbfunc cb;
}m2mjpeg_adpcm_t;
int m2mjpeg_adpcm_init(const char *filename,
int width,
int height,
int frames_per_sec,
int bitrate,
int samplerate,
int block_size,
queue_t *msg_q,
m2mjpeg_adpcm_cbfunc cb,
m2mjpeg_adpcm_t **out_m2mjpeg_adpcm);
void *m2mjpeg_adpcm_run(void *data) ;
void m2mjpeg_adpcm_buffer_free(data_buffer_t *db);
void m2mjpeg_adpcm_pause(m2mjpeg_adpcm_t *mja);
void m2mjpeg_adpcm_play(m2mjpeg_adpcm_t *mja);
void m2mjpeg_adpcm_stop(m2mjpeg_adpcm_t *mja);
int m2mjpeg_adpcm_release(m2mjpeg_adpcm_t *mja);
int m2mjpeg_adpcm_read_audio_data(m2mjpeg_adpcm_t *mja, uint8_t *data, size_t size);
size_t m2mjpeg_adpcm_get_audio_header_len(m2mjpeg_adpcm_t *mja);
int m2mjpeg_adpcm_read_audio_data_with_header(m2mjpeg_adpcm_t *mja, uint8_t *data, size_t size);
int m2mjpeg_adpcm_get_silen_voice(data_buffer_t **db);
#endif
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment