Skip to content

Instantly share code, notes, and snippets.

@t-mat
Created December 18, 2014 16:57
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save t-mat/10a9e691c91997da3957 to your computer and use it in GitHub Desktop.
Save t-mat/10a9e691c91997da3957 to your computer and use it in GitHub Desktop.
WIN32 : WASAPI - Rendering a Stream
// WASAPI : Rendering a Stream
// http://msdn.microsoft.com/en-us/library/dd316756%28v=VS.85%29.aspx
#define _USE_MATH_DEFINES
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <stdio.h>
#include <mmdeviceapi.h>
#include <audioclient.h>
#pragma comment(lib, "winmm.lib")
#include <assert.h>
#include <vector>
#include <cmath>
int main() {
CoInitializeEx(nullptr, COINIT_MULTITHREADED);
const int hnsBufferDuration = 30 * 10000; // 30ms
HRESULT hr;
IMMDevice* pDevice = nullptr;
{
IMMDeviceEnumerator* mmDeviceEnumerator = nullptr;
hr = CoCreateInstance(
__uuidof(MMDeviceEnumerator)
, nullptr
, CLSCTX_INPROC_SERVER
, IID_PPV_ARGS(&mmDeviceEnumerator)
);
assert(SUCCEEDED(hr));
hr = mmDeviceEnumerator->GetDefaultAudioEndpoint(
eRender, eMultimedia, &pDevice);
assert(SUCCEEDED(hr));
mmDeviceEnumerator->Release();
}
IAudioClient* pAudioClient = nullptr;
hr = pDevice->Activate(
__uuidof(IAudioClient)
, CLSCTX_INPROC_SERVER
, nullptr
, reinterpret_cast<void**>(&pAudioClient)
);
assert(SUCCEEDED(hr));
std::vector<char> mixFormatBuf;
{
WAVEFORMATEX* p = nullptr;
pAudioClient->GetMixFormat(&p);
mixFormatBuf.resize(sizeof(*p) + p->cbSize);
memcpy(mixFormatBuf.data(), p, mixFormatBuf.size());
CoTaskMemFree(p);
}
auto* const pMixFormat = reinterpret_cast<const WAVEFORMATEX*>(mixFormatBuf.data());
auto* const pMixFormatEx = [pMixFormat]() -> const WAVEFORMATEXTENSIBLE* {
if(pMixFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
return reinterpret_cast<const WAVEFORMATEXTENSIBLE*>(pMixFormat);
}
return nullptr;
}();
hr = pAudioClient->Initialize(
AUDCLNT_SHAREMODE_SHARED
, AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST
, hnsBufferDuration
, 0
, pMixFormat
, nullptr
);
assert(SUCCEEDED(hr));
IAudioRenderClient* pRenderClient = nullptr;
hr = pAudioClient->GetService(__uuidof(IAudioRenderClient), reinterpret_cast<void**>(&pRenderClient));
assert(SUCCEEDED(hr));
IAudioClock* pAudioClock = nullptr;
hr = pAudioClient->GetService(__uuidof(IAudioClock), reinterpret_cast<void**>(&pAudioClock));
assert(SUCCEEDED(hr));
//
HANDLE hRefillEvent = CreateEventEx(nullptr, nullptr, 0, EVENT_MODIFY_STATE | SYNCHRONIZE);
UINT32 bufferFrameCount = 0;
hr = pAudioClient->GetBufferSize(&bufferFrameCount);
assert(SUCCEEDED(hr));
hr = pAudioClient->SetEventHandle(hRefillEvent);
assert(SUCCEEDED(hr));
{ // Initial zero fill
BYTE* data = nullptr;
hr = pRenderClient->GetBuffer(bufferFrameCount, &data);
assert(SUCCEEDED(hr));
hr = pRenderClient->ReleaseBuffer(bufferFrameCount, AUDCLNT_BUFFERFLAGS_SILENT);
assert(SUCCEEDED(hr));
}
const auto bytesPerFrame = pMixFormat->nBlockAlign;
const auto nChannels = pMixFormat->nChannels;
const auto bytesPerSample = pMixFormat->wBitsPerSample / 8;
const auto bFloatSample = [pMixFormat, pMixFormatEx]() {
return pMixFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT
|| (pMixFormatEx && pMixFormatEx->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT);
}();
assert(bFloatSample);
HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
HANDLE events[2] = { hStdin, hRefillEvent };
float phase = 0.0f;
const auto dPhase = static_cast<float>(440.0f * 2.0f * M_PI / pMixFormat->nSamplesPerSec);
pAudioClient->Start();
for(bool run = true; run; ) {
const auto r = WaitForMultipleObjects(_countof(events), events, FALSE, INFINITE);
switch(r) {
default:
break;
case WAIT_OBJECT_0: // hStdin
{
INPUT_RECORD ir;
DWORD dw;
ReadConsoleInput(hStdin, &ir, 1, &dw);
if(ir.EventType == KEY_EVENT) {
run = false;
}
}
break;
case WAIT_OBJECT_0 + 1: // hRefillEvent
{
UINT64 freq = 0;
pAudioClock->GetFrequency(&freq);
UINT64 position = 0;
pAudioClock->GetPosition(&position, nullptr);
double sec = static_cast<double>(position) / static_cast<double>(freq);
printf("AudioClock : freq=%lldHz, pos=%10lld, %10.5fsec\r", freq, position, sec);
}
{
UINT32 paddingFrameCount = 0;
pAudioClient->GetCurrentPadding(&paddingFrameCount);
const auto availableFrameCount = bufferFrameCount - paddingFrameCount;
BYTE* data = nullptr;
pRenderClient->GetBuffer(availableFrameCount, &data);
auto* fData = reinterpret_cast<float*>(data);
for(UINT32 iFrame = 0; iFrame < availableFrameCount; ++iFrame) {
const auto v = sinf(phase) * 0.25f;
for(int iChannel = 0; iChannel < nChannels; ++iChannel) {
*fData++ = v;
}
phase += dPhase;
}
pRenderClient->ReleaseBuffer(availableFrameCount, 0);
}
break;
}
}
printf("\n");
pAudioClient->Stop();
pAudioClock->Release();
pRenderClient->Release();
pAudioClient->Release();
pDevice->Release();
CoUninitialize();
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment