Skip to content

Instantly share code, notes, and snippets.

@floere
Last active January 24, 2021 23:21
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save floere/be4b9bd309d586dc43cb865493e076ce to your computer and use it in GitHub Desktop.
Save floere/be4b9bd309d586dc43cb865493e076ce to your computer and use it in GitHub Desktop.
Simple spatializing sound system (Unity 2020.1.0b, using the DSPGraph Audio Framework 0.1.0-preview.11)
protected override void OnUpdate()
{
var audioListenerTranslation = audioListener.transform.position;
Entities.
ForEach(
(Entity entity, ref OneShot oneShot, in Translation translation) =>
{
// An enum id identifying the sound to be played.
var soundsId = oneShot.Value;
if (soundsId == Sounds.Id.None)
{
return;
}
var audioClip = ... // Get AudioClip from soundsId.
// Play audio relative to the listener.
var relativeTranslation = translation.Value - (float3) audioListenerTranslation;
// Use memoized audioSystem to play one shot.
audioSystem.playOneShot(audioClip, relativeTranslation);
}
).
WithName("OneShotSystem").
WithoutBurst().
Run();
}
using System;
using System.Collections.Generic;
using BUD.Audio.Filters;
using Unity.Entities;
using Unity.Audio;
using UnityEngine;
using Unity.Mathematics;
namespace BUD.Systems
{
public class AudioSystem : SystemBase
{
private const float MIN_ATTENUATION = 0.1f;
private const float MAX_ATTENUATION = 1f;
// Ears are 2 metres apart (-1 - +1).
private const int MID_TO_EAR_DISTANCE = 1;
private const int SPEED_OF_SOUND_M_PER_S = 343;
// Clip stopped event.
public struct ClipStopped { }
private DSPGraph graph;
private List<DSPNode> freeNodes;
private List<DSPNode> playingNodes;
private Dictionary<DSPNode, DSPNode> clipToSpatializerMap;
private Dictionary<DSPNode, DSPConnection> clipToConnectionMap;
private Dictionary<DSPNode, DSPNode> clipToLowpassMap;
private List<DSPConnection> connections;
private AudioOutputHandle output;
int handlerID;
protected override void OnCreate()
{
freeNodes = new List<DSPNode>();
playingNodes = new List<DSPNode>();
clipToSpatializerMap = new Dictionary<DSPNode, DSPNode>();
clipToConnectionMap = new Dictionary<DSPNode, DSPConnection>();
clipToLowpassMap = new Dictionary<DSPNode, DSPNode>();
connections = new List<DSPConnection>();
var format = ChannelEnumConverter.GetSoundFormatFromSpeakerMode(AudioSettings.speakerMode);
var channels = ChannelEnumConverter.GetChannelCountFromSoundFormat(format);
AudioSettings.GetDSPBufferSize(out var bufferLength, out var numBuffers);
var sampleRate = AudioSettings.outputSampleRate;
graph = DSPGraph.Create(format, channels, bufferLength, sampleRate);
if (!graph.Valid)
{
Debug.Log("DSPGraph not valid!");
return;
}
var driver = new DefaultDSPGraphDriver { Graph = graph };
output = driver.AttachToDefaultOutput();
// Add an event handler delegate to the graph for ClipStopped. So we are notified
// of when a clip is stopped in the node and can handle the resources on the main thread.
handlerID = graph.AddNodeEventHandler<ClipStopped>(
(node, evt) =>
{
// Debug.Log(
// "Received ClipStopped event on main thread, cleaning resources"
// );
playingNodes.Remove(node);
freeNodes.Add(node);
}
);
SetupGraph(channels);
}
protected void SetupGraph(int channels)
{
// All async interaction with the graph must be done through a DSPCommandBlock.
// Create it here and complete it once all commands are added.
var block = graph.CreateCommandBlock();
// var node = createPlayClipNode(block, channels);
// connect(block, inNode: node, outNode: )
// We are done, fire off the command block atomically to the mixer thread.
block.Complete();
}
// Play a one shot (relative to the listener).
// 1. Get free node.
// 2. Set up playclip params.
// 3. Set up spatializer params.
// 4. Set connection attenuation.
// 5. Set lowpass filter.
public void playOneShot(AudioClip audioClip, float3 relativeTranslation)
{
var block = graph.CreateCommandBlock();
var clipNode = getFreeNode(block, graph.OutputChannelCount);
// Decide on playback rate here by taking the provider input rate and the output settings of the system
var resampleRate = (float) audioClip.frequency / AudioSettings.outputSampleRate;
block.SetFloat<PlayClipKernel.Parameters, PlayClipKernel.SampleProviders, PlayClipKernel>(
clipNode,
PlayClipKernel.
Parameters.
Rate,
resampleRate
);
// Assign the sample provider to the slot of the node.
block.SetSampleProvider<PlayClipKernel.Parameters, PlayClipKernel.SampleProviders, PlayClipKernel>(
audioClip,
clipNode,
PlayClipKernel.
SampleProviders.
DefaultSlot
);
// Set spatializer node parameters.
clipToSpatializerMap.TryGetValue(clipNode, out DSPNode spatializerNode);
// Set delay channel based on relativeTranslation. Is it coming from left or right?
var channel = relativeTranslation.x < 0
? SpatializerKernel.Channels.Left
: SpatializerKernel.Channels.Right;
// Set delay samples based on relativeTranslation. How much from the left/right is it coming?
var distanceA = math.length(relativeTranslation + new float3(-MID_TO_EAR_DISTANCE, 0, 0));
var distanceB = math.length(relativeTranslation + new float3(+MID_TO_EAR_DISTANCE, 0, 0));
var diff = math.abs(distanceA - distanceB);
var sampleRatePerChannel = (graph.SampleRate / graph.OutputChannelCount);
var samples = diff * sampleRatePerChannel / SPEED_OF_SOUND_M_PER_S;
block.SetFloat<SpatializerKernel.Parameters, SpatializerKernel.SampleProviders, SpatializerKernel>(
spatializerNode,
SpatializerKernel.
Parameters.
Channel,
(float)
channel
);
block.SetFloat<SpatializerKernel.Parameters, SpatializerKernel.SampleProviders, SpatializerKernel>(
spatializerNode,
SpatializerKernel.
Parameters.
Samples,
samples
);
// Set attenuation based on distance.
clipToConnectionMap.TryGetValue(clipNode, out DSPConnection connection);
var closestDistance = math.min(distanceA, distanceB);
// Anything inside 10m has no attenuation.
var closestInside10mCircle = math.max(closestDistance - 9, 1);
block.SetAttenuation(connection, math.clamp(1 / closestInside10mCircle, MIN_ATTENUATION, MAX_ATTENUATION));
// Set lowpass based on distance.
clipToLowpassMap.TryGetValue(clipNode, out DSPNode lowpassFilterNode);
block.
SetFloat<Filter.AudioKernel.Parameters, Filter.AudioKernel.Providers,
Filter.AudioKernel>(
lowpassFilterNode,
Filter.AudioKernel.Parameters.Cutoff,
math.clamp(
(1 / closestInside10mCircle) * sampleRatePerChannel,
1000,
sampleRatePerChannel
)
);
// Kick off playback.
block.
UpdateAudioKernel<PlayClipKernelUpdate, PlayClipKernel.Parameters, PlayClipKernel.SampleProviders,
PlayClipKernel>(
new
PlayClipKernelUpdate(),
clipNode
);
block.Complete();
}
// Return free node (already added to playing nodes).
// Also sets up needed nodes and connections.
protected DSPNode getFreeNode(DSPCommandBlock block, int channels)
{
try
{
var node = freeNodes[0];
freeNodes.RemoveAt(0);
playingNodes.Add(node);
return node;
}
catch (Exception e)
{
// No node is available. Create a new one.
//
// The structure that is set up:
//
// ┌──────────────────────────────┐ ┌──────────────────────────────┐
// │ playingNodes │ │ freeNodes │
// └──────────────────────────────┘ └──────────────────────────────┘
// │ │
// ┌──────── ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
// │
// ▼
// ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐
// │ │ │ │ │ │ │ │
// │ PlayClip │────▶│ Spatializer │────▶│ Lowpass │────▶│ Root │
// │ │ │ │ │ │ │ │
// └──────────────┘ └──────────────┘ └──────────────┘ └──────────────┘
// │ ▲ ▲
// │
// └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘
// clipToSpatializerMap clipToLowpassMap
//
var node = createPlayClipNode(block, channels);
var spatializerNode = createSpatializerNode(block, channels);
playingNodes.Add(node);
clipToSpatializerMap.Add(node, spatializerNode);
// Used for directional sound.
var nodeSpatializerConnection = connect(block, inNode: node, outNode: spatializerNode);
clipToConnectionMap.Add(node, nodeSpatializerConnection);
// Lowpass based on distance.
var lowpassFilterNode = createLowpassFilterNode(block, 1000, channels);
clipToLowpassMap.Add(node, lowpassFilterNode);
// Insert lowpass filter node between spatializer and root node.
connect(block, inNode: spatializerNode, outNode: lowpassFilterNode);
connect(block, inNode: lowpassFilterNode, outNode: graph.RootDSP);
return node;
}
}
private DSPConnection connect(DSPCommandBlock block, DSPNode inNode, DSPNode? outNode = null)
{
outNode = outNode ?? graph.RootDSP;
var connection = block.Connect(inNode, 0, outNode.Value, 0);
connections.Add(connection);
return connection;
}
private DSPNode createPlayClipNode(DSPCommandBlock block, int channels)
{
var node = block.CreateDSPNode<PlayClipKernel.Parameters, PlayClipKernel.SampleProviders, PlayClipKernel>();
// Currently input and output ports are dynamic and added via this API to a node.
// This will change to a static definition of nodes in the future.
block.AddOutletPort(node, channels, SoundFormat.Stereo);
return node;
}
// Create a spatializer node.
//
// Setting parameters:
// block.SetFloat<SpatializerKernel.Parameters, SpatializerKernel.SampleProviders, SpatializerKernel>(
// node,
// SpatializerKernel.
// Parameters.
// Channel,
// 0
// );
// block.SetFloat<SpatializerKernel.Parameters, SpatializerKernel.SampleProviders, SpatializerKernel>(
// node,
// SpatializerKernel.
// Parameters.
// Samples,
// 500
// );
private DSPNode createSpatializerNode(DSPCommandBlock block, int channels)
{
var node = block.
CreateDSPNode<SpatializerKernel.Parameters, SpatializerKernel.SampleProviders, SpatializerKernel>();
block.AddInletPort(node, channels, SoundFormat.Stereo);
block.AddOutletPort(node, channels, SoundFormat.Stereo);
return node;
}
// Create lowpass filter node.
//
// Setting parameters:
// block.
// SetFloat<Filter.AudioKernel.Parameters, Filter.AudioKernel.Providers,
// Filter.AudioKernel>(
// lowpassFilterNode,
// Filter.AudioKernel.Parameters.Cutoff,
// cutoffHz
// );
private DSPNode createLowpassFilterNode(DSPCommandBlock block, float cutoffHz, int channels)
{
var node = Filter.CreateNode(block, Filter.Type.Lowpass, channels);
block.
SetFloat<Filter.AudioKernel.Parameters, Filter.AudioKernel.Providers,
Filter.AudioKernel>(
node,
Filter.AudioKernel.Parameters.Cutoff,
cutoffHz
);
return node;
}
protected override void OnUpdate()
{
graph.Update();
}
protected override void OnDestroy()
{
// Command blocks can also be completed via the C# 'using' construct for convenience
using (var block = graph.CreateCommandBlock())
{
for (int i = 0; i < connections.Count; i++)
{
block.Disconnect(connections[i]);
}
for (int i = 0; i < freeNodes.Count; i++)
{
block.ReleaseDSPNode(freeNodes[i]);
}
for (int i = 0; i < playingNodes.Count; i++)
{
block.ReleaseDSPNode(playingNodes[i]);
}
}
graph.RemoveNodeEventHandler(handlerID);
output.Dispose();
}
}
}
using System;
using Unity.Audio;
using Unity.Burst;
using Unity.Collections;
using Unity.Collections.LowLevel.Unsafe;
using UnityEngine;
// From the DSPGraph 0.1.0-preview.11 samples, with modifications.
//
namespace BUD.Audio.Filters
{
public struct Filter
{
public enum Type
{
Lowpass,
Highpass,
Bandpass,
Bell,
Notch,
Lowshelf,
Highshelf
}
// Use like so:
// lowPassNode = StateVariableFilter.CreateNode(block, StateVariableFilter.FilterType.Lowpass, 2);
// block.Connect(node, 0, lowPassNode, 0);
// block.Connect(lowPassNode, 0, graph.RootDSP, 0);
//
// Set parameters like so:
// block.
// SetFloat<Filter.AudioKernel.Parameters, Filter.AudioKernel.Providers,
// Filter.AudioKernel>(
// filterNode,
// Filter.AudioKernel.Parameters.Cutoff,
// 500f // Cutoff Hz
// );
//
// Lowpass
// Cutoff: 10.0f - 22000.0f
// Q: 1f - 100f
// GainInDBs: -80f - 0f
//
public static DSPNode CreateNode(DSPCommandBlock block, Type type, int channels)
{
var node = block.CreateDSPNode<AudioKernel.Parameters, AudioKernel.Providers, AudioKernel>();
block.AddInletPort(node, channels, SoundFormat.Stereo);
block.AddOutletPort(node, channels, SoundFormat.Stereo);
block.SetFloat<AudioKernel.Parameters, AudioKernel.Providers, AudioKernel>(
node,
AudioKernel.Parameters.
FilterType,
(float) type
);
return node;
}
struct Coefficients
{
public float A, g, k, a1, a2, a3, m0, m1, m2;
}
static Coefficients DesignBell(float fc, float quality, float linearGain)
{
var A = linearGain;
var g = Mathf.Tan(Mathf.PI * fc);
var k = 1 / (quality * A);
var a1 = 1 / (1 + g * (g + k));
var a2 = g * a1;
var a3 = g * a2;
var m0 = 1;
var m1 = k * (A * A - 1);
var m2 = 0;
return new Coefficients { A = A, g = g, k = k, a1 = a1, a2 = a2, a3 = a3, m0 = m0, m1 = m1, m2 = m2 };
}
static Coefficients DesignLowpass(float normalizedFrequency, float Q, float linearGain)
{
var A = linearGain;
var g = Mathf.Tan(Mathf.PI * normalizedFrequency);
var k = 1 / Q;
var a1 = 1 / (1 + g * (g + k));
var a2 = g * a1;
var a3 = g * a2;
var m0 = 0;
var m1 = 0;
var m2 = 1;
return new Coefficients { A = A, g = g, k = k, a1 = a1, a2 = a2, a3 = a3, m0 = m0, m1 = m1, m2 = m2 };
}
static Coefficients DesignBandpass(float normalizedFrequency, float Q, float linearGain)
{
var coefficients = Design(Type.Lowpass, normalizedFrequency, Q, linearGain);
coefficients.m1 = 1;
coefficients.m2 = 0;
return coefficients;
}
static Coefficients DesignHighpass(float normalizedFrequency, float Q, float linearGain)
{
var coefficients = Design(Type.Lowpass, normalizedFrequency, Q, linearGain);
coefficients.m0 = 1;
coefficients.m1 = -coefficients.k;
coefficients.m2 = -1;
return coefficients;
}
static Coefficients DesignNotch(float normalizedFrequency, float Q, float linearGain)
{
var coefficients = DesignLowpass(normalizedFrequency, Q, linearGain);
coefficients.m0 = 1;
coefficients.m1 = -coefficients.k;
coefficients.m2 = 0;
return coefficients;
}
static Coefficients DesignLowshelf(float normalizedFrequency, float Q, float linearGain)
{
var A = linearGain;
var g = Mathf.Tan(Mathf.PI * normalizedFrequency) / Mathf.Sqrt(A);
var k = 1 / Q;
var a1 = 1 / (1 + g * (g + k));
var a2 = g * a1;
var a3 = g * a2;
var m0 = 1;
var m1 = k * (A - 1);
var m2 = A * A - 1;
return new Coefficients { A = A, g = g, k = k, a1 = a1, a2 = a2, a3 = a3, m0 = m0, m1 = m1, m2 = m2 };
}
static Coefficients DesignHighshelf(float normalizedFrequency, float Q, float linearGain)
{
var A = linearGain;
var g = Mathf.Tan(Mathf.PI * normalizedFrequency) / Mathf.Sqrt(A);
var k = 1 / Q;
var a1 = 1 / (1 + g * (g + k));
var a2 = g * a1;
var a3 = g * a2;
var m0 = A * A;
var m1 = k * (1 - A) * A;
var m2 = 1 - A * A;
return new Coefficients { A = A, g = g, k = k, a1 = a1, a2 = a2, a3 = a3, m0 = m0, m1 = m1, m2 = m2 };
}
static Coefficients Design(Type type, float normalizedFrequency, float Q, float linearGain)
{
switch (type)
{
case Type.Lowpass: return DesignLowpass(normalizedFrequency, Q, linearGain);
case Type.Highpass: return DesignHighpass(normalizedFrequency, Q, linearGain);
case Type.Bandpass: return DesignBandpass(normalizedFrequency, Q, linearGain);
case Type.Bell: return DesignBell(normalizedFrequency, Q, linearGain);
case Type.Notch: return DesignNotch(normalizedFrequency, Q, linearGain);
case Type.Lowshelf: return DesignLowshelf(normalizedFrequency, Q, linearGain);
case Type.Highshelf: return DesignHighshelf(normalizedFrequency, Q, linearGain);
default:
throw new ArgumentException("Unknown filter type", nameof(type));
}
}
static Coefficients Design(Type type, float cutoff, float Q, float gainInDBs, float sampleRate)
{
var linearGain = Mathf.Pow(10, gainInDBs / 20);
switch (type)
{
case Type.Lowpass:
return DesignLowpass(cutoff / sampleRate, Q, linearGain);
case Type.Highpass:
return DesignHighpass(cutoff / sampleRate, Q, linearGain);
case Type.Bandpass:
return DesignBandpass(cutoff / sampleRate, Q, linearGain);
case Type.Bell:
return DesignBell(cutoff / sampleRate, Q, linearGain);
case Type.Notch:
return DesignNotch(cutoff / sampleRate, Q, linearGain);
case Type.Lowshelf:
return DesignLowshelf(cutoff / sampleRate, Q, linearGain);
case Type.Highshelf:
return DesignHighshelf(cutoff / sampleRate, Q, linearGain);
default:
throw new ArgumentException("Unknown filter type", nameof(type));
}
}
[BurstCompile(CompileSynchronously = true)]
public struct AudioKernel : IAudioKernel<AudioKernel.Parameters, AudioKernel.Providers>
{
public struct Channel
{
public float z1, z2;
}
[NativeDisableContainerSafetyRestriction]
public NativeArray<Channel> channels;
public enum Parameters
{
[ParameterDefault((float) Filter.Type.Lowpass)]
[ParameterRange(
(float) Filter.Type.Lowpass,
(float) Filter.Type.Highshelf
)]
FilterType,
[ParameterDefault(5000.0f)] [ParameterRange(10.0f, 22000.0f)]
Cutoff,
[ParameterDefault(1.0f)] [ParameterRange(1.0f, 100.0f)]
Q,
[ParameterDefault(0.0f)] [ParameterRange(-80.0f, 0.0f)]
GainInDBs
}
public enum Providers { }
public void Initialize()
{
channels = new NativeArray<Channel>(2, Allocator.AudioKernel);
}
public void Execute(ref ExecuteContext<Parameters, Providers> context)
{
var input = context.Inputs.GetSampleBuffer(0);
var output = context.Outputs.GetSampleBuffer(0);
var channelCount = output.Channels;
var sampleFrames = output.Samples;
var champleCount = channelCount * sampleFrames;
var inputBuffer = input.Buffer;
var outputBuffer = output.Buffer;
if (channels.Length == 0)
{
for (var n = 0; n < champleCount; n++)
outputBuffer[n] = 0.0f;
return;
}
var parameters = context.Parameters;
var filterType = (Type) parameters.GetFloat(Parameters.FilterType, 0);
var cutoff = parameters.GetFloat(Parameters.Cutoff, 0);
var q = parameters.GetFloat(Parameters.Q, 0);
var gain = parameters.GetFloat(Parameters.GainInDBs, 0);
var coefficients = Design(filterType, cutoff, q, gain, context.SampleRate);
for (var c = 0; c < channels.Length; c++)
{
var z1 = channels[c].z1;
var z2 = channels[c].z2;
for (var i = 0; i < champleCount; i += channelCount)
{
var x = inputBuffer[i + c];
var v3 = x - z2;
var v1 = coefficients.a1 * z1 + coefficients.a2 * v3;
var v2 = z2 + coefficients.a2 * z1 + coefficients.a3 * v3;
z1 = 2 * v1 - z1;
z2 = 2 * v2 - z2;
outputBuffer[i + c] = coefficients.A *
(coefficients.m0 * x + coefficients.m1 * v1 + coefficients.m2 * v2);
}
channels[c] = new Channel { z1 = z1, z2 = z2 };
}
}
public void Dispose()
{
if (channels.IsCreated)
channels.Dispose();
}
}
}
}
using BUD.Audio.Utils;
using BUD.Systems;
using Unity.Audio;
using Unity.Collections;
using Unity.Collections.LowLevel.Unsafe;
using Unity.Burst;
namespace BUD.Audio.Kernels
{
// From the DSPGraph 0.1.0-preview.11 samples, with small modifications.
//
// The 'audio job'. This is the kernel that defines a running DSP node inside the
// DSPGraph. It is a struct that implements the IAudioKernel interface. It can contain
// internal state, and will have the Execute function called as part of the graph
// traversal during an audio frame.
//
[BurstCompile(CompileSynchronously = true)]
struct PlayClipKernel : IAudioKernel<PlayClipKernel.Parameters, PlayClipKernel.SampleProviders>
{
// Parameters are currently defined with enumerations. Each enum value corresponds to
// a parameter within the node. Setting a value for a parameter uses these enum values.
public enum Parameters
{
Rate
}
// Sample providers are defined with enumerations. Each enum value defines a slot where
// a sample provider can live on a IAudioKernel. Sample providers are used to get samples from
// AudioClips and VideoPlayers. They will eventually be able to pull samples from microphones and other concepts.
public enum SampleProviders
{
DefaultSlot
}
// The clip sample rate might be different to the output rate used by the system. Therefore we use a resampler
// here.
public Resampler resampler;
[NativeDisableContainerSafetyRestriction]
public NativeArray<float> resampleBuffer;
// Updated by the PlayClipKernelUpdate.
public bool playing;
public void Initialize()
{
// During an initialization phase, we have access to a resource context which we can
// do buffer allocations with safely in the job.
resampleBuffer = new NativeArray<float>(1025 * 2, Allocator.AudioKernel);
// set position to "end of buffer", to force pulling data on first iteration
resampler.position = (double) resampleBuffer.Length / 2;
}
public void Execute(ref ExecuteContext<Parameters, SampleProviders> context)
{
if (playing)
{
// During the creation phase of this node we added an output port to feed samples to.
// This API gives access to that output buffer.
var buffer = context.Outputs.GetSampleBuffer(0);
// Get the sample provider for the AudioClip currently being played. This allows
// streaming of samples from the clip into a buffer.
var provider = context.Providers.GetSampleProvider(SampleProviders.DefaultSlot);
// We pass the provider to the resampler. If the resampler finishes streaming all the samples, it returns
// true.
var finished = resampler.ResampleLerpRead(
provider,
resampleBuffer,
buffer.Buffer,
context.Parameters,
Parameters.Rate
);
if (finished)
{
// Post an async event back to the main thread, telling the handler that the clip has stopped playing.
context.PostEvent(new AudioSystem.ClipStopped());
playing = false;
}
}
}
public void Dispose()
{
if (resampleBuffer.IsCreated)
resampleBuffer.Dispose();
}
}
}
using Unity.Audio;
using Unity.Burst;
namespace BUD.Audio.Kernels
{
// From the DSPGraph 0.1.0-preview.11 samples, with small modifications.
[BurstCompile(CompileSynchronously = true)]
struct PlayClipKernelUpdate : IAudioKernelUpdate<PlayClipKernel.Parameters, PlayClipKernel.SampleProviders, PlayClipKernel>
{
// This update job is used to kick off playback of the node.
public void Update(ref PlayClipKernel audioKernel)
{
audioKernel.playing = true;
}
}
}
using System;
using Unity.Audio;
using Unity.Burst;
using Unity.Collections;
// From the DSPGraph 0.1.0-preview.11 samples, with small modifications.
namespace BUD.Audio.Utils
{
// From the DSPGraph 0.1.0-preview.11 samples, with small modifications.
[BurstCompile(CompileSynchronously = true)]
public struct Resampler
{
public double position;
public bool ResampleLerpRead<T>(
SampleProvider provider,
NativeArray<float> input,
NativeArray<float> output,
ParameterData<T> parameterData,
T rateParam)
where T : unmanaged, Enum
{
var finishedSampleProvider = false;
for (var i = 0; i < output.Length / 2; i++)
{
var rate = parameterData.GetFloat(rateParam, i);
position += rate;
var length = input.Length / 2 - 1;
while (position >= length)
{
input[0] = input[input.Length - 2];
input[1] = input[input.Length - 1];
finishedSampleProvider |= ReadSamples(provider, new NativeSlice<float>(input, 2));
position -= input.Length / 2 - 1;
}
var positionFloor = Math.Floor(position);
var positionFraction = position - positionFloor;
var previousSampleIndex = (int)positionFloor;
var nextSampleIndex = previousSampleIndex + 1;
var prevSampleL = input[previousSampleIndex * 2 + 0];
var prevSampleR = input[previousSampleIndex * 2 + 1];
var sampleL = input[nextSampleIndex * 2 + 0];
var sampleR = input[nextSampleIndex * 2 + 1];
output[i * 2 + 0] = (float)(prevSampleL + (sampleL - prevSampleL) * positionFraction);
output[i * 2 + 1] = (float)(prevSampleR + (sampleR - prevSampleR) * positionFraction);
}
return finishedSampleProvider;
}
// read either mono or stereo, always convert to stereo interleaved
static bool ReadSamples(SampleProvider provider, NativeSlice<float> destination)
{
if (!provider.Valid)
return true;
var finished = false;
// Read from SampleProvider and convert to interleaved stereo if needed
if (provider.ChannelCount == 2)
{
var read = provider.Read(destination.Slice(0, destination.Length));
if (read < destination.Length / 2)
{
for (var i = read * 2; i < destination.Length; i++)
destination[i] = 0;
return true;
}
}
else
{
var n = destination.Length / 2;
var buffer = destination.Slice(0, n);
var read = provider.Read(buffer);
if (read < n)
{
for (var i = read; i < n; i++)
destination[i] = 0;
finished = true;
}
for (var i = n - 1; i >= 0; i--)
{
destination[i * 2 + 0] = destination[i];
destination[i * 2 + 1] = destination[i];
}
}
return finished;
}
}
}
using Unity.Burst;
using Unity.Collections;
namespace BUD.Audio.Utils
{
// The "spatializer" can apply a delay to a channel by a number of samples, so that a sound appears to be coming
// from the other side.
// Always is stereo.
[BurstCompile(CompileSynchronously = true)]
public struct Spatializer
{
public byte delayedChannel;
public int delayInSamples;
// Delay left or right channel a number of samples.
public void Delay(
NativeArray<float> input,
NativeArray<float> output,
NativeArray<float> delayBuffer)
{
var delay = delayInSamples;
var delayed = delayedChannel;
var normal = 1 - delayedChannel;
// First, write delay samples from the buffer into the delayed channel.
var i = 0;
for (; i < delay; i++)
{
output[i * 2 + delayed] = delayBuffer[i]; // Read from the buffer (can be empty at the start).
output[i * 2 + normal] = input[i * 2 + normal];
}
// Then, write the rest up to the delayed part.
for (; i < output.Length / 2; i++)
{
output[i * 2 + delayed] = input[(i - delay) * 2 + delayed]; // From the delayed input.
output[i * 2 + normal] = input[i * 2 + normal];
}
// And write the rest (of the delayed channel) on the delay buffer.
i -= delay;
for (; i < output.Length / 2; i++)
{
delayBuffer[i] = input[(i - delay) * 2 + delayed]; // Write the rest to the buffer.
}
}
}
}
using BUD.Audio.Utils;
using Unity.Audio;
using Unity.Collections;
using Unity.Collections.LowLevel.Unsafe;
using Unity.Burst;
using Unity.Mathematics;
namespace BUD.Audio.Kernels
{
// The kernel for a spatializer node.
//
[BurstCompile(CompileSynchronously = true)]
struct SpatializerKernel : IAudioKernel<SpatializerKernel.Parameters, SpatializerKernel.SampleProviders>
{
public enum Parameters
{
Channel,
Samples,
}
public enum Channels
{
Left = 1,
Right = 0,
}
public enum SampleProviders
{
DefaultSlot
}
private const int MAX_DELAY = 1025;
[NativeDisableContainerSafetyRestriction]
private NativeArray<float> delayBuffer;
private Spatializer spatializer;
public void Initialize()
{
// During an initialization phase, we have access to a resource context which we can
// do buffer allocations with safely in the job.
delayBuffer = new NativeArray<float>(MAX_DELAY * 2, Allocator.AudioKernel);
// Add a Spatializer that does the work.
spatializer = new Spatializer();
}
public void Execute(ref ExecuteContext<Parameters, SampleProviders> context)
{
// Input and output buffers that we are going to read and write.
var inputBuffer = context.Inputs.GetSampleBuffer(0);
var outputBuffer = context.Outputs.GetSampleBuffer(0);
var delayInSamplesFloat = context.Parameters.GetFloat(Parameters.Samples, 0);
var delayInSamples = math.min((int) delayInSamplesFloat, MAX_DELAY);
spatializer.delayedChannel = (byte) context.Parameters.GetFloat(Parameters.Channel, 0);
spatializer.delayInSamples = delayInSamples;
spatializer.Delay(
inputBuffer.Buffer,
outputBuffer.Buffer,
delayBuffer
);
}
public void Dispose()
{
if (delayBuffer.IsCreated)
{
delayBuffer.Dispose();
}
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment