Skip to content

Instantly share code, notes, and snippets.

@niansa
Last active February 20, 2024 22:13
Show Gist options
  • Save niansa/d7ee6b137054f1fda192cec5302ae9fb to your computer and use it in GitHub Desktop.
Save niansa/d7ee6b137054f1fda192cec5302ae9fb to your computer and use it in GitHub Desktop.
Steam Audio example
#include <iostream>
#include <fstream>
#include <vector>
#include <cstdint>
#include <phonon.h>
std::vector<float> load_input_audio(const char *filename) {
std::ifstream file(filename, std::ios::binary);
file.seekg(0, std::ios::end);
auto filesize = file.tellg();
auto numsamples = static_cast<int>(filesize / sizeof(float));
std::vector<float> fres(numsamples);
file.seekg(0, std::ios::beg);
file.read(reinterpret_cast<char*>(fres.data()), filesize);
return fres;
}
void save_output_audio(const char *filename, const std::vector<float>& outputaudio) {
std::ofstream file(filename, std::ios::binary);
file.write(reinterpret_cast<const char*>(outputaudio.data()), outputaudio.size() * sizeof(float));
}
int main() {
// Create the context
IPLContextSettings contextSettings {
.version = STEAMAUDIO_VERSION
};
IPLContext context = nullptr;
iplContextCreate(&contextSettings, &context);
// Typical audio settings...
IPLAudioSettings audioSettings {
.samplingRate = 44100,
.frameSize = 1024
};
// Create the HRTF
// The HRTF basically describes the "set of filters that is applied to audio in order to spatialize it"
IPLHRTFSettings hrtfSettings {
.type = IPL_HRTFTYPE_DEFAULT,
.volume = 1.0f
};
IPLHRTF hrtf = nullptr;
iplHRTFCreate(context, &audioSettings, &hrtfSettings, &hrtf);
// Now we create the buffer for the binaural effect
IPLBinauralEffectSettings effectSettings {
.hrtf = hrtf
};
IPLBinauralEffect effect = nullptr;
iplBinauralEffectCreate(context, &audioSettings, &effectSettings, &effect);
// Create local buffers
// The output is stereo, so its buffer must be twice the size
std::vector<float> inputaudio = load_input_audio(INPUT_FILE);
std::vector<float> outputaudio;
outputaudio.reserve(2 * inputaudio.size());
// Create tiny frame buffer
// This buffer one individual audio frame at a time.
// In fact, it holds 2 frames because the output is stereo.
std::vector<float> outputaudioframe(2 * audioSettings.frameSize);
// Create a pointer to the beginning of input data
float *inData = inputaudio.data();
// Now we can pass that buffer to phonon
IPLAudioBuffer inBuffer {
.numChannels = 1,
.numSamples = audioSettings.frameSize,
.data = &inData
};
// And we need to let it allocate its own output buffer
// That buffer is "deinterleaved", which means that it's actually one buffer for each channel
IPLAudioBuffer outBuffer {};
iplAudioBufferAllocate(context, 2/* stereo output! */, audioSettings.frameSize, &outBuffer);
// Calculate the total amount of audio frames in the input
const int numframes = inputaudio.size() / audioSettings.frameSize;
// Now just iteratively repeat until we're done!
// The nice thing about the way it's done is that this allows streaming (which is important for video games which play audio dynamically)
for (unsigned i = 0; i != numframes; ++i) {
// Define spatialization effect parameters
IPLBinauralEffectParams effectParams {
.direction = {1.0f, 1.0f, 1.0f},
.interpolation = IPL_HRTFINTERPOLATION_NEAREST,
.spatialBlend = 1.0f,
.hrtf = hrtf
};
// And actually do the magic!
iplBinauralEffectApply(effect, &effectParams, &inBuffer, &outBuffer);
// Interleave the output data
// This means that we combine the 2 channels into one which makes it easier to save the output to a file later
iplAudioBufferInterleave(context, &outBuffer, outputaudioframe.data());
// Copy interleaved data into out final output buffer
std::copy(std::begin(outputaudioframe), std::end(outputaudioframe), std::back_inserter(outputaudio));
// Advance the input to the next frame
// This simply increments the pointer to the beginning of the next frame
// Since `inBuffer` has a pointer to this pointer, this will cause it to read the next frame
inData += audioSettings.frameSize;
}
// Save the ouput for observation
save_output_audio("outputaudio.raw", outputaudio);
// And clean up...
iplAudioBufferFree(context, &outBuffer);
iplBinauralEffectRelease(&effect);
iplHRTFRelease(&hrtf);
iplContextRelease(&context);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment