|
#include <AudioConverterFfmpeg.h> |
|
|
|
namespace poly { |
|
|
|
/* -------------------------------------------------------- */ |
|
|
|
static AVSampleFormat audiobitsize_to_avsampleformat(AudioBitSize bs, bool isInterleaved); |
|
static uint64_t audiomode_to_avchannellayout(AudioMode mode); |
|
static int audiomode_to_numchannels(AudioMode mode); |
|
|
|
/* -------------------------------------------------------- */ |
|
|
|
AudioConverterFfmpeg::AudioConverterFfmpeg() |
|
:listener(NULL) |
|
,ctx(NULL) |
|
,src_rate(-1) |
|
,dst_rate(-1) |
|
,dst_chanlayout(0) |
|
,dst_format(AV_SAMPLE_FMT_NONE) |
|
,dst_nchannels(-1) |
|
,dst_nbytes_allocated(0) |
|
,dst_linesize(-1) |
|
,dst_max_samples(-1) |
|
,dst_data(NULL) |
|
{ |
|
} |
|
|
|
AudioConverterFfmpeg::~AudioConverterFfmpeg() { |
|
} |
|
|
|
int AudioConverterFfmpeg::init(AudioSettings from, AudioSettings to, AudioConverterListener* lis) { |
|
|
|
int r = 0; |
|
|
|
if (NULL == lis) { |
|
SX_ERROR("Listener is NULL."); |
|
r = -1; |
|
goto error; |
|
} |
|
|
|
convert_from = from; |
|
convert_to = to; |
|
listener = lis; |
|
src_rate = audio_samplerate_get_int(from.samplerate); |
|
dst_chanlayout = audiomode_to_avchannellayout(to.mode); |
|
dst_rate = audio_samplerate_get_int(to.samplerate); |
|
dst_format = audiobitsize_to_avsampleformat(to.bitsize, to.is_interleaved); |
|
dst_nchannels = av_get_channel_layout_nb_channels(audiomode_to_avchannellayout(to.mode)); |
|
|
|
ctx = swr_alloc(); |
|
if (NULL == ctx) { |
|
SX_ERROR("Failed to allocate the resample context."); |
|
r = -2; |
|
goto error; |
|
} |
|
|
|
av_opt_set_int(ctx, "in_channel_layout", audiomode_to_avchannellayout(from.mode), 0); |
|
av_opt_set_int(ctx, "in_sample_rate", src_rate, 0); |
|
av_opt_set_sample_fmt(ctx, "in_sample_fmt", audiobitsize_to_avsampleformat(from.bitsize, from.is_interleaved), 0); |
|
|
|
av_opt_set_int(ctx, "out_channel_layout", dst_chanlayout, 0); |
|
av_opt_set_int(ctx, "out_sample_rate", dst_rate, 0); |
|
av_opt_set_sample_fmt(ctx, "out_sample_fmt", dst_format, 0); |
|
|
|
if ( (r = swr_init(ctx)) < 0) { |
|
SX_ERROR("Failed to initialize the resample context."); |
|
r = -3; |
|
goto error; |
|
} |
|
|
|
error: |
|
|
|
if (r < 0) { |
|
shutdown(); |
|
} |
|
|
|
return r; |
|
} |
|
|
|
int AudioConverterFfmpeg::shutdown() { |
|
|
|
int r = 0; |
|
|
|
if (NULL != ctx) { |
|
swr_free(&ctx); |
|
ctx = NULL; |
|
} |
|
|
|
if (NULL != dst_data) { |
|
free(dst_data); |
|
dst_data = NULL; |
|
} |
|
|
|
src_rate = -1; |
|
dst_rate = -1; |
|
dst_format = AV_SAMPLE_FMT_NONE; |
|
dst_nchannels = -1; |
|
dst_nbytes_allocated = -1; |
|
dst_linesize = -1; |
|
dst_max_samples = -1; |
|
|
|
return r; |
|
} |
|
|
|
int AudioConverterFfmpeg::convert(void* data, size_t nbytes, size_t nframes) { |
|
|
|
if (NULL == data) { |
|
SX_ERROR("data is NULL."); |
|
return -1; |
|
} |
|
|
|
if (0 == nbytes) { |
|
SX_ERROR("nbytes is 0."); |
|
return -2; |
|
} |
|
|
|
if (0 == nframes) { |
|
SX_ERROR("nframes is 0."); |
|
return -3; |
|
} |
|
|
|
if (NULL == ctx) { |
|
SX_ERROR("ctx is NULL."); |
|
return -4; |
|
} |
|
|
|
int r = 0; |
|
int delayed_samples = swr_get_delay(ctx, src_rate); |
|
int num_dst_samples = av_rescale_rnd(delayed_samples + nframes, dst_rate, src_rate, AV_ROUND_UP); |
|
int nbytes_needed = av_samples_get_buffer_size(NULL, dst_nchannels, nframes, dst_format, 1); |
|
|
|
/* Allocate or reallocate the destination buffer. */ |
|
if (nbytes_needed > dst_nbytes_allocated) { |
|
if (NULL == dst_data) { |
|
dst_data = (uint8_t*)malloc(nbytes_needed); |
|
if (NULL == dst_data) { |
|
SX_ERROR("Failed to allocate the destination buffer of %d bytes.", nbytes_needed); |
|
return -1; |
|
} |
|
} |
|
else { |
|
dst_data = (uint8_t*)realloc(dst_data, nbytes_needed); |
|
if (NULL == dst_data) { |
|
SX_ERROR("Failed to reallocate the destination buffer of %d bytes.", nbytes_needed); |
|
return -2; |
|
} |
|
} |
|
dst_nbytes_allocated = nbytes_needed; |
|
} |
|
|
|
r = swr_convert(ctx, |
|
&dst_data, |
|
num_dst_samples, |
|
(const uint8_t**)data, |
|
nframes |
|
); |
|
|
|
return r; |
|
} |
|
|
|
/* -------------------------------------------------------- */ |
|
|
|
static AVSampleFormat audiobitsize_to_avsampleformat(AudioBitSize bs, bool isInterleaved) { |
|
|
|
if (false == isInterleaved) { |
|
/* Planar */ |
|
switch (bs) { |
|
case AUDIO_BITSIZE_S16: { return AV_SAMPLE_FMT_S16P; } |
|
case AUDIO_BITSIZE_S32: { return AV_SAMPLE_FMT_S32P; } |
|
case AUDIO_BITSIZE_F32: { return AV_SAMPLE_FMT_FLTP; } |
|
default: { |
|
SX_ERROR("Unhandled AudioBitSize; cannot convert to AVSampleFormat. (exiting)."); |
|
exit(EXIT_FAILURE); |
|
} |
|
}; |
|
} |
|
else { |
|
/* Non-Planar */ |
|
switch (bs) { |
|
case AUDIO_BITSIZE_S16: { return AV_SAMPLE_FMT_S16; } |
|
case AUDIO_BITSIZE_S32: { return AV_SAMPLE_FMT_S32; } |
|
case AUDIO_BITSIZE_F32: { return AV_SAMPLE_FMT_FLT; } |
|
default: { |
|
SX_ERROR("Unhandled AudioBitSize; cannot convert to AVSampleFormat. (exiting)."); |
|
exit(EXIT_FAILURE); |
|
} |
|
} |
|
} |
|
} |
|
|
|
static uint64_t audiomode_to_avchannellayout(AudioMode mode) { |
|
|
|
if (AUDIO_MODE_STEREO == mode) { |
|
return AV_CH_LAYOUT_STEREO; |
|
} |
|
else if (AUDIO_MODE_MONO == mode) { |
|
return AV_CH_LAYOUT_MONO; |
|
} |
|
else { |
|
SX_ERROR("Unhandled AudioMode; cannot convert to avchannellayout. (exiting)."); |
|
exit(EXIT_FAILURE); |
|
} |
|
} |
|
|
|
static int audiomode_to_numchannels(AudioMode mode) { |
|
|
|
if (AUDIO_MODE_STEREO == mode) { |
|
return 2; |
|
} |
|
else if (AUDIO_MODE_MONO == mode) { |
|
return 1; |
|
} |
|
else { |
|
SX_ERROR("Unhandled AudioMode; cannot convert to number of channels.. (exiting)."); |
|
exit(EXIT_FAILURE); |
|
} |
|
} |
|
|
|
/* -------------------------------------------------------- */ |
|
|
|
} /* namespace poly */ |