Skip to content

Instantly share code, notes, and snippets.

@austinbhale
Created October 5, 2022 15:45
Show Gist options
  • Save austinbhale/33174526182fe459d06693f9b3ad27cd to your computer and use it in GitHub Desktop.
Save austinbhale/33174526182fe459d06693f9b3ad27cd to your computer and use it in GitHub Desktop.
namespace SKAudioGraph
{
using StereoKit;
using System;
using System.Runtime.InteropServices;
using System.Threading.Tasks;
using Windows.Foundation;
using Windows.Media;
using Windows.Media.Audio;
using Windows.Media.MediaProperties;
using Windows.Media.Render;
// We are initializing a COM interface for use within the namespace
// This interface allows access to memory at the byte level which we need to populate audio data that is generated
[ComImport]
[Guid("5B0D3235-4DBA-4D44-865E-8F1D0E4FD04D")]
[InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
unsafe interface IMemoryBufferByteAccess
{
void GetBuffer(out byte* buffer, out uint capacity);
}
internal class Program
{
private static AudioGraph graph;
private static AudioDeviceOutputNode deviceOutputNode;
private static AudioFrameInputNode frameInputNode;
private static double theta = 0;
static void Main(string[] args)
{
// Initialize StereoKit
SKSettings settings = new SKSettings
{
appName = "SKAudioGraph",
assetsFolder = "Assets",
};
if (!SK.Initialize(settings))
Environment.Exit(1);
Vec3 menuPosition = Input.Head.position + Input.Head.Forward * 0.6f + Vec3.Right * 0.15f;
Pose menuPose = new Pose(menuPosition, Quat.LookAt(menuPosition, Input.Head.position));
Vec2 menuSize = new Vec2(20, 10) * U.cm;
string audioText = "Generate Audio";
CreateAudioGraph().GetAwaiter().GetResult();
// Core application loop
while (SK.Step(() =>
{
UI.WindowBegin("Local Playback Menu", ref menuPose, menuSize);
if (UI.Button(audioText))
{
if (audioText.Equals("Generate Audio"))
{
frameInputNode.Start();
audioText = "Stop";
}
else
{
frameInputNode.Stop();
audioText = "Generate Audio";
}
}
if (UI.Button("Exit"))
{
SK.Quit();
}
UI.WindowEnd();
}))
{
}
;
SK.Shutdown();
}
private static async Task CreateAudioGraph()
{
// Create an AudioGraph with default settings
AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Media);
CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);
if (result.Status != AudioGraphCreationStatus.Success)
{
// Cannot create graph
Log.Info(String.Format("AudioGraph Creation Error because {0}", result.Status.ToString()));
return;
}
graph = result.Graph;
// Create a device output node
CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await graph.CreateDeviceOutputNodeAsync();
if (deviceOutputNodeResult.Status != AudioDeviceNodeCreationStatus.Success)
{
// Cannot create device output node
Log.Info(String.Format("Audio Device Output unavailable because {0}", deviceOutputNodeResult.Status.ToString()));
}
deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;
Log.Info("Device Output Node successfully created");
// Create the FrameInputNode at the same format as the graph, except explicitly set mono.
AudioEncodingProperties nodeEncodingProperties = graph.EncodingProperties;
nodeEncodingProperties.ChannelCount = 1;
frameInputNode = graph.CreateFrameInputNode(nodeEncodingProperties);
frameInputNode.AddOutgoingConnection(deviceOutputNode);
// Initialize the Frame Input Node in the stopped state
frameInputNode.Stop();
// Hook up an event handler so we can start generating samples when needed
// This event is triggered when the node is required to provide data
frameInputNode.QuantumStarted += node_QuantumStarted;
// Start the graph since we will only start/stop the frame input node
graph.Start();
}
private static void node_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
{
// GenerateAudioData can provide PCM audio data by directly synthesizing it or reading from a file.
// Need to know how many samples are required. In this case, the node is running at the same rate as the rest of the graph
// For minimum latency, only provide the required amount of samples. Extra samples will introduce additional latency.
uint numSamplesNeeded = (uint)args.RequiredSamples;
if (numSamplesNeeded != 0)
{
AudioFrame audioData = GenerateAudioData(numSamplesNeeded);
frameInputNode.AddFrame(audioData);
}
}
unsafe private static AudioFrame GenerateAudioData(uint samples)
{
// Buffer size is (number of samples) * (size of each sample)
// We choose to generate single channel (mono) audio. For multi-channel, multiply by number of channels
uint bufferSize = samples * sizeof(float);
AudioFrame frame = new Windows.Media.AudioFrame(bufferSize);
using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
using (IMemoryBufferReference reference = buffer.CreateReference())
{
byte* dataInBytes;
uint capacityInBytes;
float* dataInFloat;
// Get the buffer from the AudioFrame
((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);
// Cast to float since the data we are generating is float
dataInFloat = (float*)dataInBytes;
float freq = 1000; // choosing to generate frequency of 1kHz
float amplitude = 0.3f;
int sampleRate = (int)graph.EncodingProperties.SampleRate;
double sampleIncrement = (freq * (Math.PI * 2)) / sampleRate;
// Generate a 1kHz sine wave and populate the values in the memory buffer
for (int i = 0; i < samples; i++)
{
double sinValue = amplitude * Math.Sin(theta);
dataInFloat[i] = (float)sinValue;
theta += sampleIncrement;
}
}
return frame;
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment