Skip to content

Instantly share code, notes, and snippets.

@icculus
Created August 30, 2023 17:26
Show Gist options
  • Save icculus/3ee44fffa1bfffd6f330e674964d7e59 to your computer and use it in GitHub Desktop.
Save icculus/3ee44fffa1bfffd6f330e674964d7e59 to your computer and use it in GitHub Desktop.
Attempt to generalize SDL3 audio stream format management
diff --git a/src/audio/SDL_audiocvt.c b/src/audio/SDL_audiocvt.c
index 5b3feb044..aac258b97 100644
--- a/src/audio/SDL_audiocvt.c
+++ b/src/audio/SDL_audiocvt.c
@@ -43,8 +43,9 @@ static Sint64 GetResampleRate(const int src_rate, const int dst_rate)
SDL_assert(src_rate > 0);
SDL_assert(dst_rate > 0);
- if (src_rate == dst_rate)
+ if (src_rate == dst_rate) {
return 0;
+ }
Sint64 sample_rate = ((Sint64)src_rate << 32) / (Sint64)dst_rate;
SDL_assert(sample_rate > 0);
@@ -272,7 +273,7 @@ void SDL_SetupAudioResampler()
}
static void ResampleAudio(const int chans, const float *inbuf, const int inframes, float *outbuf, const int outframes,
- const Sint64 resample_rate, Sint64* resample_offset)
+ const Sint64 resample_rate, Sint64* resample_offset)
{
SDL_assert(resample_rate > 0);
float *dst = outbuf;
@@ -639,19 +640,14 @@ static int SetAudioStreamFormat(SDL_AudioStream *stream, const SDL_AudioSpec *sr
const SDL_AudioFormat src_format = src_spec->format;
const int src_channels = src_spec->channels;
const int src_rate = src_spec->freq;
-
- const SDL_AudioFormat dst_format = dst_spec->format;
- const int dst_channels = dst_spec->channels;
const int dst_rate = dst_spec->freq;
const int src_sample_frame_size = (SDL_AUDIO_BITSIZE(src_format) / 8) * src_channels;
- const int dst_sample_frame_size = (SDL_AUDIO_BITSIZE(dst_format) / 8) * dst_channels;
-
const int prev_history_buffer_frames = stream->history_buffer_frames;
const Sint64 resample_rate = GetResampleRate(src_rate, dst_rate);
const int resampler_padding_frames = GetResamplerPaddingFrames(resample_rate);
const int history_buffer_frames = GetHistoryBufferSampleFrames(resampler_padding_frames);
- const int history_buffer_frame_size = CalculateMaxSampleFrameSize(stream->src_spec.format, stream->src_spec.channels, src_format, src_channels);
+ const int history_buffer_frame_size = CalculateMaxSampleFrameSize(stream->chunks.spec.format, stream->chunks.spec.channels, src_format, src_channels);
const size_t history_buffer_allocation = history_buffer_frames * history_buffer_frame_size;
Uint8 *history_buffer = stream->history_buffer;
@@ -672,12 +668,12 @@ static int SetAudioStreamFormat(SDL_AudioStream *stream, const SDL_AudioSpec *sr
if (stream->history_buffer) {
if (history_buffer_frames <= prev_history_buffer_frames) {
ConvertAudio(history_buffer_frames, stream->history_buffer,
- stream->src_spec.format, stream->src_spec.channels,
+ stream->chunks.spec.format, stream->chunks.spec.channels,
history_buffer,
src_format, src_channels, NULL);
} else {
ConvertAudio(prev_history_buffer_frames, stream->history_buffer,
- stream->src_spec.format, stream->src_spec.channels,
+ stream->chunks.spec.format, stream->chunks.spec.channels,
history_buffer + ((history_buffer_frames - prev_history_buffer_frames) * src_sample_frame_size),
src_format, src_channels, NULL);
SDL_memset(history_buffer, SDL_GetSilenceValueForFormat(src_format), (history_buffer_frames - prev_history_buffer_frames) * src_sample_frame_size); // silence oldest history samples.
@@ -694,18 +690,6 @@ static int SetAudioStreamFormat(SDL_AudioStream *stream, const SDL_AudioSpec *sr
stream->resampler_padding_frames = resampler_padding_frames;
stream->history_buffer_frames = history_buffer_frames;
- stream->src_sample_frame_size = src_sample_frame_size;
- stream->dst_sample_frame_size = dst_sample_frame_size;
- stream->max_sample_frame_size = CalculateMaxSampleFrameSize(src_format, src_channels, dst_format, dst_channels);
- stream->resample_rate = resample_rate;
-
- if (src_spec != &stream->src_spec) {
- SDL_memcpy(&stream->src_spec, src_spec, sizeof (SDL_AudioSpec));
- }
-
- if (dst_spec != &stream->dst_spec) {
- SDL_memcpy(&stream->dst_spec, dst_spec, sizeof (SDL_AudioSpec));
- }
return 0;
}
@@ -713,37 +697,36 @@ static int SetAudioStreamFormat(SDL_AudioStream *stream, const SDL_AudioSpec *sr
SDL_AudioStream *SDL_CreateAudioStream(const SDL_AudioSpec *src_spec, const SDL_AudioSpec *dst_spec)
{
// !!! FIXME: fail if audio isn't initialized
+ if (src_spec) {
+ if (!SDL_IsSupportedChannelCount(src_spec->channels)) {
+ SDL_InvalidParamError("src_spec->channels");
+ return NULL;
+ } else if (!SDL_IsSupportedAudioFormat(src_spec->format)) {
+ SDL_InvalidParamError("src_spec->format");
+ return NULL;
+ } else if (src_spec->freq >= SDL_MAX_SINT32 / RESAMPLER_SAMPLES_PER_ZERO_CROSSING) {
+ SDL_SetError("Source rate is too high");
+ return NULL;
+ } else if (src_spec->freq <= 0) {
+ SDL_InvalidParamError("src_spec->freq");
+ return NULL;
+ }
+ }
- if (!src_spec) {
- SDL_InvalidParamError("src_spec");
- return NULL;
- } else if (!dst_spec) {
- SDL_InvalidParamError("dst_spec");
- return NULL;
- } else if (!SDL_IsSupportedChannelCount(src_spec->channels)) {
- SDL_InvalidParamError("src_spec->channels");
- return NULL;
- } else if (!SDL_IsSupportedChannelCount(dst_spec->channels)) {
- SDL_InvalidParamError("dst_spec->channels");
- return NULL;
- } else if (src_spec->freq <= 0) {
- SDL_InvalidParamError("src_spec->freq");
- return NULL;
- } else if (dst_spec->freq <= 0) {
- SDL_InvalidParamError("dst_spec->freq");
- return NULL;
- } else if (src_spec->freq >= SDL_MAX_SINT32 / RESAMPLER_SAMPLES_PER_ZERO_CROSSING) {
- SDL_SetError("Source rate is too high");
- return NULL;
- } else if (dst_spec->freq >= SDL_MAX_SINT32 / RESAMPLER_SAMPLES_PER_ZERO_CROSSING) {
- SDL_SetError("Destination rate is too high");
- return NULL;
- } else if (!SDL_IsSupportedAudioFormat(src_spec->format)) {
- SDL_InvalidParamError("src_spec->format");
- return NULL;
- } else if (!SDL_IsSupportedAudioFormat(dst_spec->format)) {
- SDL_InvalidParamError("dst_spec->format");
- return NULL;
+ if (dst_spec) {
+ if (!SDL_IsSupportedChannelCount(dst_spec->channels)) {
+ SDL_InvalidParamError("dst_spec->channels");
+ return NULL;
+ } else if (!SDL_IsSupportedAudioFormat(dst_spec->format)) {
+ SDL_InvalidParamError("dst_spec->format");
+ return NULL;
+ } else if (dst_spec->freq <= 0) {
+ SDL_InvalidParamError("dst_spec->freq");
+ return NULL;
+ } else if (dst_spec->freq >= SDL_MAX_SINT32 / RESAMPLER_SAMPLES_PER_ZERO_CROSSING) {
+ SDL_SetError("Destination rate is too high");
+ return NULL;
+ }
}
SDL_AudioStream *retval = (SDL_AudioStream *)SDL_calloc(1, sizeof(SDL_AudioStream));
@@ -767,13 +750,17 @@ SDL_AudioStream *SDL_CreateAudioStream(const SDL_AudioSpec *src_spec, const SDL_
SDL_SetupAudioResampler();
retval->packetlen = packetlen;
- SDL_memcpy(&retval->src_spec, src_spec, sizeof (SDL_AudioSpec));
- if (SetAudioStreamFormat(retval, src_spec, dst_spec) == -1) {
- SDL_DestroyAudioStream(retval);
- return NULL;
+ if (src_spec) {
+ SDL_copyp(&retval->src_spec, src_spec);
}
+ if (dst_spec) {
+ SDL_copyp(&retval->dst_spec, dst_spec);
+ }
+
+ retval->chunks_tail = &retval->chunks;
+
return retval;
}
@@ -858,39 +845,75 @@ int SDL_SetAudioStreamFormat(SDL_AudioStream *stream, const SDL_AudioSpec *src_s
}
SDL_LockMutex(stream->lock);
- const int retval = SetAudioStreamFormat(stream, src_spec ? src_spec : &stream->src_spec, dst_spec ? dst_spec : &stream->dst_spec);
+
+ if (src_spec) {
+ SDL_copyp(&stream->src_spec, src_spec);
+ }
+
+ if (dst_spec) {
+ SDL_copyp(&stream->dst_spec, dst_spec);
+ }
+
SDL_UnlockMutex(stream->lock);
- return retval;
+ return 0;
}
-int SDL_PutAudioStreamData(SDL_AudioStream *stream, const void *buf, int len)
-{
-#if DEBUG_AUDIOSTREAM
- SDL_Log("AUDIOSTREAM: wants to put %d preconverted bytes", len);
-#endif
+static int GetAudioStreamAvailable(SDL_AudioStream *stream, const SDL_AudioSpec *dst_spec);
- if (stream == NULL) {
- return SDL_InvalidParamError("stream");
- } else if (buf == NULL) {
- return SDL_InvalidParamError("buf");
- } else if (len == 0) {
- return 0; // nothing to do.
+static int PutAudioStreamData(SDL_AudioStream *stream, const SDL_AudioSpec *spec, const void *buf, int len)
+{
+ // top-level should have checked all these!
+ SDL_assert(stream != NULL);
+ SDL_assert(spec != NULL);
+ SDL_assert(spec->format != 0);
+ SDL_assert(buf != NULL);
+ SDL_assert(len > 0);
+
+ const int src_sample_frame_size = (SDL_AUDIO_BITSIZE(spec->format) / 8) * spec->channels;
+ if ((len % src_sample_frame_size) != 0) {
+ return SDL_SetError("Can't add partial sample frames");
}
SDL_LockMutex(stream->lock);
- const int prev_available = stream->put_callback ? SDL_GetAudioStreamAvailable(stream) : 0;
+ SDL_AudioStreamChunk *current_chunk = stream->chunks_tail;
+ SDL_assert(current_chunk != NULL); // should at least be &stream->chunk.
+ const SDL_AudioSpec *current_spec = &current_chunk->spec;
- if ((len % stream->src_sample_frame_size) != 0) {
- SDL_UnlockMutex(stream->lock);
- return SDL_SetError("Can't add partial sample frames");
+ if ((current_spec->format != spec->format) || (current_spec->channels != spec->channels) || (current_spec->freq != spec->freq)) {
+ SDL_assert(current_chunk->next == NULL);
+
+ SDL_AudioStreamChunk *newchunk;
+ if (current_chunk->spec.format == 0) { // first put to empty stream...
+ SDL_assert(current_chunk == &stream->chunks);
+ newchunk = current_chunk;
+ } else {
+ newchunk = SDL_malloc(sizeof (SDL_AudioStreamChunk));
+ if (!newchunk) {
+ SDL_UnlockMutex(stream->lock);
+ return SDL_OutOfMemory();
+ }
+ stream->chunks_tail = current_chunk->next = newchunk;
+ current_chunk = newchunk;
+ }
+
+ SDL_copyp(&newchunk->spec, spec);
+ newchunk->num_bytes = 0;
+ newchunk->src_sample_frame_size = (SDL_AUDIO_BITSIZE(spec->format) / 8) * spec->channels;
+ current_spec = spec;
}
+ const int prev_available = stream->put_callback ? SDL_GetAudioStreamAvailable(stream) : 0;
+
// just queue the data, we convert/resample when dequeueing.
const int retval = SDL_WriteToDataQueue(stream->queue, buf, len);
stream->flushed = SDL_FALSE;
+ if (retval == 0) {
+ current_chunk->num_bytes += len;
+ }
+
if (stream->put_callback) {
const int newavail = SDL_GetAudioStreamAvailable(stream) - prev_available;
if (newavail > 0) { // don't call the callback if we can't actually offer new data (still filling future buffer, only added 1 frame but downsampling needs more to produce new sound, etc).
@@ -903,6 +926,27 @@ int SDL_PutAudioStreamData(SDL_AudioStream *stream, const void *buf, int len)
return retval;
}
+int SDL_PutAudioStreamData(SDL_AudioStream *stream, const void *buf, int len)
+{
+#if DEBUG_AUDIOSTREAM
+ SDL_Log("AUDIOSTREAM: wants to put %d bytes", len);
+#endif
+
+ if (stream == NULL) {
+ return SDL_InvalidParamError("stream");
+ } else if (stream->src_spec.format == 0) {
+ return SDL_SetError("Audio stream's src format hasn't been set yet"); // you should have either set this during SDL_CreateAudioStream or called SDL_SetAudioStreamFormat.
+ } else if (stream->dst_spec.format == 0) {
+ return SDL_SetError("Audio stream's dst format hasn't been set yet"); // you should have either set this during SDL_CreateAudioStream or called SDL_SetAudioStreamFormat.
+ } else if (buf == NULL) {
+ return SDL_InvalidParamError("buf");
+ } else if (len == 0) {
+ return 0; // nothing to do.
+ }
+
+ return PutAudioStreamData(stream, &stream->src_spec, buf, len);
+}
+
int SDL_FlushAudioStream(SDL_AudioStream *stream)
{
@@ -941,7 +985,7 @@ static void UpdateStreamHistoryBuffer(SDL_AudioStream* stream, Uint8* input_buff
{
// Even if we aren't currently resampling, we always need to update the history buffer
Uint8 *history_buffer = stream->history_buffer;
- int history_bytes = stream->history_buffer_frames * stream->src_sample_frame_size;
+ int history_bytes = stream->history_buffer_frames * stream->chunks.src_sample_frame_size;
if (left_padding != NULL) {
// Fill in the left padding using the history buffer
@@ -959,21 +1003,24 @@ static void UpdateStreamHistoryBuffer(SDL_AudioStream* stream, Uint8* input_buff
}
}
+static size_t GetAudioStreamChunkFramesAvailable(const SDL_AudioStreamChunk *chunk, const SDL_AudioSpec *dst_spec, Sint64 resample_offset, size_t *_padding);
+
+
// You must hold stream->lock and validate your parameters before calling this!
-static int GetAudioStreamDataInternal(SDL_AudioStream *stream, void *buf, int len)
+static int GetAudioStreamDataInternal(SDL_AudioStream *stream, const SDL_AudioSpec *dst_spec, void *buf, int len)
{
- const SDL_AudioFormat src_format = stream->src_spec.format;
- const int src_channels = stream->src_spec.channels;
- const int src_sample_frame_size = stream->src_sample_frame_size;
-
- const SDL_AudioFormat dst_format = stream->dst_spec.format;
- const int dst_channels = stream->dst_spec.channels;
- const int dst_sample_frame_size = stream->dst_sample_frame_size;
+ const SDL_AudioSpec *src_spec = &stream->chunks.spec;
+ const SDL_AudioFormat src_format = src_spec->format;
+ const int src_channels = src_spec->channels;
+ const int src_sample_frame_size = stream->chunks.src_sample_frame_size;
- const int max_sample_frame_size = stream->max_sample_frame_size;
+ const SDL_AudioFormat dst_format = dst_spec->format;
+ const int dst_channels = dst_spec->channels;
+ const int dst_sample_frame_size = (SDL_AUDIO_BITSIZE(dst_spec->format) / 8) * dst_spec->channels;
+ const Sint64 resample_rate = GetResampleRate(src_spec->freq, dst_spec->freq);
+ const int max_sample_frame_size = CalculateMaxSampleFrameSize(src_format, src_channels, dst_format, dst_channels);
const int resampler_padding_frames = stream->resampler_padding_frames;
- const Sint64 resample_rate = stream->resample_rate;
#if DEBUG_AUDIOSTREAM
SDL_Log("AUDIOSTREAM: asking for an output chunk of %d bytes.", len);
@@ -983,25 +1030,33 @@ static int GetAudioStreamDataInternal(SDL_AudioStream *stream, void *buf, int le
// Clamp the output length to the maximum currently available.
// The rest of this function assumes enough input data is available.
- const int max_available = SDL_GetAudioStreamAvailable(stream);
+ // This only handles a single chunk at a time, which is to say, it stops at
+ // the boundaries of source format changes, so we can dump finished chunks and
+ // adjust stream state for the next Get operation.
+ size_t max_available_padding;
+ size_t max_available = GetAudioStreamChunkFramesAvailable(&stream->chunks, dst_spec, stream->resample_offset, &max_available_padding);
+ if ((max_available_padding > 0) && !stream->flushed) {
+ max_available = (max_available < max_available_padding) ? 0 : (max_available - max_available_padding);
+ }
+
+SDL_Log("max_available==%d\n", (int) max_available);
- if (len > max_available) {
- len = max_available;
+ if (((size_t) len) > max_available) {
+ len = (int) max_available;
}
int output_frames = len / dst_sample_frame_size;
if (output_frames == 0) {
+ // !!! FIXME: if we're at the end of a chunk and we don't have enough to fill the resampler padding, we need to deal with that data in some way!
return 0; // nothing to do.
}
- int input_frames = output_frames;
const int output_bytes = output_frames * dst_sample_frame_size;
// Not resampling? It's an easy conversion (and maybe not even that!)
if (resample_rate == 0) {
- SDL_assert(input_frames == output_frames);
-
+ const int input_frames = output_frames;
Uint8* input_buffer = NULL;
// If no conversion is happening, read straight into the output buffer.
@@ -1021,6 +1076,9 @@ static int GetAudioStreamDataInternal(SDL_AudioStream *stream, void *buf, int le
const int bytes_read = (int) SDL_ReadFromDataQueue(stream->queue, input_buffer, input_bytes);
SDL_assert(bytes_read == input_bytes);
+ SDL_assert(((size_t) bytes_read) <= stream->chunks.num_bytes);
+ stream->chunks.num_bytes -= bytes_read;
+
// Even if we aren't currently resampling, we always need to update the history buffer
UpdateStreamHistoryBuffer(stream, input_buffer, input_bytes, NULL, 0);
@@ -1037,7 +1095,7 @@ static int GetAudioStreamDataInternal(SDL_AudioStream *stream, void *buf, int le
// Because resampling happens "between" frames, The same number of output_frames
// can require a different number of input_frames, depending on the resample_offset.
// Infact, input_frames can sometimes even be zero when upsampling.
- input_frames = GetResamplerNeededInputFrames(output_frames, resample_rate, stream->resample_offset);
+ const int input_frames = GetResamplerNeededInputFrames(output_frames, resample_rate, stream->resample_offset);
const int input_bytes = input_frames * src_sample_frame_size;
// If increasing channels, do it after resampling, since we'd just
@@ -1148,53 +1206,89 @@ static int GetAudioStreamDataInternal(SDL_AudioStream *stream, void *buf, int le
return output_bytes;
}
-// get converted/resampled data from the stream
-int SDL_GetAudioStreamData(SDL_AudioStream *stream, void *voidbuf, int len)
+// you should hold stream->lock when calling this!
+static int RemoveCompletedAudioStreamChunk(SDL_AudioStream *stream)
{
+ if (stream->chunks.num_bytes == 0) { // finished with this one?
+ SDL_AudioStreamChunk *next = stream->chunks.next;
+ if (!next) {
+ SDL_assert(stream->chunks_tail == &stream->chunks);
+ SDL_zero(stream->chunks);
+ } else {
+ SDL_assert(next->num_bytes > 0); // we should not have added a zero-byte chunk.
+ if (SetAudioStreamFormat(stream, &next->spec, &stream->dst_spec) != 0) {
+ return -1;
+ }
+ SDL_copyp(&stream->chunks, next);
+ if (stream->chunks_tail == next) {
+ stream->chunks_tail = &stream->chunks;
+ }
+ SDL_free(next);
+ stream->resample_offset = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int GetAudioStreamDataInDstFormat(SDL_AudioStream *stream, void *voidbuf, int len, const SDL_AudioSpec *dst_spec)
+{
+ // these should have all been checked by the higher level
+ SDL_assert(stream != NULL);
+ SDL_assert(dst_spec->format != 0);
+ SDL_assert(voidbuf != NULL);
+ SDL_assert(len > 0);
+
Uint8 *buf = (Uint8 *) voidbuf;
#if DEBUG_AUDIOSTREAM
SDL_Log("AUDIOSTREAM: want to get %d converted bytes", len);
#endif
- if (stream == NULL) {
- return SDL_InvalidParamError("stream");
- } else if (buf == NULL) {
- return SDL_InvalidParamError("buf");
- } else if (len < 0) {
- return SDL_InvalidParamError("len");
- } else if (len == 0) {
- return 0; // nothing to do.
+ SDL_LockMutex(stream->lock);
+
+ if (RemoveCompletedAudioStreamChunk(stream) == -1) { // try up front, in case this failed when we had bytes to return.
+ SDL_UnlockMutex(stream->lock);
+ return -1;
+ } else if (stream->chunks.num_bytes == 0) {
+ SDL_UnlockMutex(stream->lock);
+ return 0;
+ } else if (SetAudioStreamFormat(stream, &stream->chunks.spec, dst_spec) == -1) { // make sure the buffers are allocated correctly and in the right format
+ SDL_UnlockMutex(stream->lock);
+ return -1;
}
- SDL_LockMutex(stream->lock);
+ const int dst_sample_frame_size = (SDL_AUDIO_BITSIZE(dst_spec->format) / 8) * dst_spec->channels;
- len -= len % stream->dst_sample_frame_size; // chop off any fractional sample frame.
+ len -= len % dst_sample_frame_size; // chop off any fractional sample frame.
// give the callback a chance to fill in more stream data if it wants.
if (stream->get_callback) {
- int approx_request = len / stream->dst_sample_frame_size; // start with sample frames desired
- if (stream->resample_rate) {
- approx_request = GetResamplerNeededInputFrames(approx_request, stream->resample_rate, stream->resample_offset);
+ // callback works in the app-specified format, not a specific chunk's.
+ const int src_sample_frame_size = (SDL_AUDIO_BITSIZE(stream->src_spec.format) / 8) * stream->src_spec.channels;
+ int approx_request = len / dst_sample_frame_size; // start with sample frames desired
+ const Sint64 resample_rate = GetResampleRate(stream->src_spec.freq, dst_spec->freq);
+ if (resample_rate) {
+ approx_request = GetResamplerNeededInputFrames(approx_request, resample_rate, stream->resample_offset);
if (!stream->flushed) { // do we need to fill the future buffer to accommodate this, too?
approx_request += stream->resampler_padding_frames;
}
}
- approx_request *= stream->src_sample_frame_size; // convert sample frames to bytes.
- const int already_have = SDL_GetAudioStreamAvailable(stream);
+ approx_request *= src_sample_frame_size; // convert sample frames to bytes.
+ const int already_have = GetAudioStreamAvailable(stream, dst_spec);
approx_request -= SDL_min(approx_request, already_have); // we definitely have this much output already packed in.
if (approx_request > 0) { // don't call the callback if we can satisfy this request with existing data.
stream->get_callback(stream->get_callback_userdata, stream, approx_request);
}
}
- const int chunk_size = stream->dst_sample_frame_size * 4096;
+ const int chunk_size = dst_sample_frame_size * 4096;
int retval = 0;
while (len > 0) { // didn't ask for a whole sample frame, nothing to do
- const int rc = GetAudioStreamDataInternal(stream, buf, SDL_min(len, chunk_size));
+ const int rc = GetAudioStreamDataInternal(stream, dst_spec, buf, SDL_min(len, chunk_size));
if (rc == -1) {
#if DEBUG_AUDIOSTREAM
@@ -1208,9 +1302,15 @@ int SDL_GetAudioStreamData(SDL_AudioStream *stream, void *voidbuf, int len)
#if DEBUG_AUDIOSTREAM
SDL_Log("AUDIOSTREAM: output chunk ended up being %d bytes.", rc);
#endif
+
buf += rc;
len -= rc;
retval += rc;
+
+ if (RemoveCompletedAudioStreamChunk(stream) == -1) {
+ break; // we already have bytes to return, so don't fail here if this exploded.
+ }
+
if (rc < chunk_size) {
break;
}
@@ -1224,41 +1324,104 @@ int SDL_GetAudioStreamData(SDL_AudioStream *stream, void *voidbuf, int len)
#endif
return retval;
+
}
-// number of converted/resampled bytes available
-int SDL_GetAudioStreamAvailable(SDL_AudioStream *stream)
+
+// get converted/resampled data from the stream
+int SDL_GetAudioStreamData(SDL_AudioStream *stream, void *voidbuf, int len)
{
- if (!stream) {
+ if (stream == NULL) {
return SDL_InvalidParamError("stream");
+ } else if (stream->src_spec.format == 0) {
+ return SDL_SetError("Audio stream's src format hasn't been set yet"); // you should have either set this during SDL_CreateAudioStream or called SDL_SetAudioStreamFormat.
+ } else if (stream->dst_spec.format == 0) {
+ return SDL_SetError("Audio stream's dst format hasn't been set yet"); // you should have either set this during SDL_CreateAudioStream or called SDL_SetAudioStreamFormat.
+ } else if (voidbuf == NULL) {
+ return SDL_InvalidParamError("buf");
+ } else if (len < 0) {
+ return SDL_InvalidParamError("len");
+ } else if (len == 0) {
+ return 0; // nothing to do.
}
+ return GetAudioStreamDataInDstFormat(stream, voidbuf, len, &stream->dst_spec);
+}
- SDL_LockMutex(stream->lock);
+// You should hold the chunk's stream's lock before calling this.
+static size_t GetAudioStreamChunkFramesAvailable(const SDL_AudioStreamChunk *chunk, const SDL_AudioSpec *dst_spec, Sint64 resample_offset, size_t *_padding)
+{
+ size_t padding = 0;
// total bytes available in source format in data queue
- size_t count = SDL_GetDataQueueSize(stream->queue);
+ size_t count = chunk->num_bytes;
+ if (count > 0) {
+ // total sample frames available in data queue
+ count /= chunk->src_sample_frame_size;
+
+ // sample frames after resampling
+ const Sint64 resample_rate = GetResampleRate(chunk->spec.freq, dst_spec->freq);
+ if (resample_rate) {
+ padding = (size_t) GetResamplerPaddingFrames(resample_rate);
+ count = GetResamplerAvailableOutputFrames(count, resample_rate, resample_offset);
+ }
+ }
+
+ *_padding = padding;
+ return count;
+}
+
+// This lets you calculate available audio bytes for any destination format. The higher level
+// always just uses the dst format of the stream itself, but internally we might use this to
+// bypass that.
+static int GetAudioStreamAvailable(SDL_AudioStream *stream, const SDL_AudioSpec *dst_spec)
+{
+ // the higher level should have made sure these were reasonable, so just assert here.
+ SDL_assert(stream != NULL);
+ SDL_assert(stream->src_spec.format != 0);
+ SDL_assert(dst_spec->format != 0);
+
+ size_t retval = 0;
+ size_t max_hold_back = 0;
- // total sample frames available in data queue
- count /= stream->src_sample_frame_size;
+ SDL_LockMutex(stream->lock);
- // sample frames after resampling
- if (stream->resample_rate) {
- if (!stream->flushed) {
- // have to save some samples for padding. They aren't available until more data is added or the stream is flushed.
- count = (count < ((size_t) stream->resampler_padding_frames)) ? 0 : (count - stream->resampler_padding_frames);
+ Sint64 resample_offset = stream->resample_offset;
+ for (SDL_AudioStreamChunk *chunk = &stream->chunks; chunk != NULL; chunk = chunk->next) {
+ size_t padding;
+ retval += GetAudioStreamChunkFramesAvailable(chunk, dst_spec, resample_offset, &padding);
+ if (padding > max_hold_back) {
+ max_hold_back = padding;
}
+ resample_offset = 0;
+ }
- count = GetResamplerAvailableOutputFrames(count, stream->resample_rate, stream->resample_offset);
+ // have to save some samples for padding. They aren't available until more data is added or the stream is flushed.
+ if (!stream->flushed && (max_hold_back > 0)) {
+ retval = (retval < max_hold_back) ? 0 : (retval - max_hold_back);
}
// convert from sample frames to bytes in destination format.
- count *= stream->dst_sample_frame_size;
+ const int dst_sample_frame_size = (SDL_AUDIO_BITSIZE(dst_spec->format) / 8) * dst_spec->channels;
+ retval *= dst_sample_frame_size;
SDL_UnlockMutex(stream->lock);
// if this overflows an int, just clamp it to a maximum.
const int max_int = 0x7FFFFFFF; // !!! FIXME: This will blow up on weird processors. Is there an SDL_INT_MAX?
- return (count >= ((size_t) max_int)) ? max_int : ((int) count);
+ return (retval >= ((size_t) max_int)) ? max_int : ((int) retval);
+}
+
+// number of converted/resampled bytes available
+int SDL_GetAudioStreamAvailable(SDL_AudioStream *stream)
+{
+ if (!stream) {
+ return SDL_InvalidParamError("stream");
+ } else if (stream->src_spec.format == 0) {
+ return SDL_SetError("Audio stream's src format hasn't been set yet"); // you should have either set this during SDL_CreateAudioStream or called SDL_SetAudioStreamFormat.
+ } else if (stream->dst_spec.format == 0) {
+ return SDL_SetError("Audio stream's dst format hasn't been set yet"); // you should have either set this during SDL_CreateAudioStream or called SDL_SetAudioStreamFormat.
+ }
+ return GetAudioStreamAvailable(stream, &stream->dst_spec);
}
int SDL_ClearAudioStream(SDL_AudioStream *stream)
diff --git a/src/audio/SDL_sysaudio.h b/src/audio/SDL_sysaudio.h
index 81104e594..9ae5e0393 100644
--- a/src/audio/SDL_sysaudio.h
+++ b/src/audio/SDL_sysaudio.h
@@ -154,6 +154,16 @@ typedef struct SDL_AudioDriver
SDL_AtomicInt shutting_down; // non-zero during SDL_Quit, so we known not to accept any last-minute device hotplugs.
} SDL_AudioDriver;
+// Different puts to an audio stream can have different formats (put a WAV file, changed format, pushed another WAV file).
+// This struct keeps track of when those changes happen, internally.
+typedef struct SDL_AudioStreamChunk
+{
+ SDL_AudioSpec spec;
+ size_t num_bytes;
+ int src_sample_frame_size;
+ struct SDL_AudioStreamChunk *next;
+} SDL_AudioStreamChunk;
+
struct SDL_AudioStream
{
SDL_DataQueue *queue;
@@ -175,20 +185,20 @@ struct SDL_AudioStream
int resampler_padding_frames;
int history_buffer_frames;
+ // these specs are what the app has requested, but aren't necessarily what the current chunk
+ // is using for source format or what we might use internally for destination format!
SDL_AudioSpec src_spec;
SDL_AudioSpec dst_spec;
- Sint64 resample_rate;
Sint64 resample_offset;
- int src_sample_frame_size;
- int dst_sample_frame_size;
- int max_sample_frame_size;
-
int packetlen;
SDL_bool simplified; // SDL_TRUE if created via SDL_OpenAudioDeviceStream
+ SDL_AudioStreamChunk chunks; // tracks format changes. Actual data is in `queue`.
+ SDL_AudioStreamChunk *chunks_tail;
+
SDL_LogicalAudioDevice *bound_device;
SDL_AudioStream *next_binding;
SDL_AudioStream *prev_binding;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment