Skip to content

Instantly share code, notes, and snippets.

@kettle11
Created March 7, 2020 22:00
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kettle11/537c649a50d5aee1df0edc091fce8b81 to your computer and use it in GitHub Desktop.
Save kettle11/537c649a50d5aee1df0edc091fce8b81 to your computer and use it in GitHub Desktop.
Audio callback with attenutation
impl AudioCallback for AudioThread {
type Channel = f32;
// All the mixing and primary audio logic is here.
fn callback(&mut self, out: &mut [Self::Channel]) {
let old_position = self.listener.position;
// let old_rotation = self.listener.rotation;
// Store previous position and tempo for interpoltation.
for instance in self.playing_clips.iter_mut() {
instance.old_position = instance.position;
instance.old_tempo = instance.tempo;
}
// Check messages from the main thread first.
while let Ok(message) = self.audio_thread_receive.try_recv() {
match message {
AudioMessage::NewAudioClip { id, audio_clip } => {
println!("NEW AUDIO CLIP: {:?}", id);
self.audio_clips.push(audio_clip);
}
AudioMessage::PlayAudioClip {
id,
offset,
looping,
position,
} => {
self.current_sample = offset;
self.current_audio_clip = id;
self.playing_clips.push(AudioClipInstance {
audio_clip_id: id,
current_offset: offset as f32,
looping,
volume: 0.8,
position,
radius: 0.5, // Default radius is half a meter.
tempo: 1.0,
old_position: position,
old_tempo: 1.0,
})
}
AudioMessage::MoveListener { position, rotation } => {
self.listener.position = position;
self.listener.rotation = rotation;
}
AudioMessage::MoveInstance { id, position } => {
self.playing_clips[id].position = position;
}
}
}
// First fill the output buffer with 0s
for dst in out.iter_mut() {
*dst = 0.0;
}
// https://ccrma.stanford.edu/~jos/pasp/Converting_Propagation_Distance_Delay.html
let speed_of_sound_meters_per_second = 345.0; // Speed of sound in air at 22 Celsius and 1 atmosphere.
let distance_per_sample = speed_of_sound_meters_per_second / self.samples_per_second as f32;
// Mix audio clips
for instance in self.playing_clips.iter_mut() {
let clip = &self.audio_clips[instance.audio_clip_id];
let clip_len = clip.samples.len();
let sample_stride = clip.channels as usize; // Assuming mono channel output for now.
// Inverse attenuation is used.
// Spherical waves attenuate this way because their surface area increases
// spreading the energy of the wave over a larger surface.
// Note that the inverse square law is intentionally not used as it is incorrect:
// https://ccrma.stanford.edu/~jos/pasp/Spherical_Waves_Point_Source.html
let listener_distance = (self.listener.position - instance.position).length();
let r = instance.radius / listener_distance;
let current_attenuation = f32::min(r, 1.0);
// Old attenuation is calculated.
let old_listener_distance = (old_position - instance.old_position).length();
let r = instance.radius / old_listener_distance;
let old_attenuation = f32::min(r, 1.0);
// Old to new attenuation is linearly interpolated.
let attenuation_difference = current_attenuation - old_attenuation;
let mut t = 0.0;
let t_step = 1.0 / out.len() as f32;
// By sampling based on travel time doppler effect and sound travel time is achieved.
// This effect is imperfect because all sound is always traveling from the current sound's location.
// So it seems to work reasonably well for simple scenarios.
let start_sample =
instance.current_offset - old_listener_distance / distance_per_sample;
let end_sample = instance.current_offset + out.len() as f32
- listener_distance / distance_per_sample;
let sample_difference = end_sample - start_sample;
for (_i, dst) in out.iter_mut().enumerate() {
let current_sample = start_sample + sample_difference * t;
// This bounds check should be removed from this loop.
if instance.current_offset >= clip_len as f32 {
if instance.looping {
instance.current_offset -= clip_len as f32;
} else {
// The clip instance should be removed instead of being left around.
break;
}
}
fn linear_interpolation_read(samples: &Vec<f32>, offset: f32) -> f32 {
if offset < 0.0 {
return 0.0;
}
let mut offset = offset;
if offset >= samples.len() as f32 {
offset -= samples.len() as f32;
}
let i0 = offset as usize;
let i1 = if i0 == samples.len() - 1 { 0 } else { i0 + 1 };
let d = offset - (i0 as f32);
let v0 = samples[i0];
let v1 = samples[i1];
let v = ((v1 - v0) * d) + v0;
v
}
let v = linear_interpolation_read(&clip.samples, current_sample as f32);
let v = v * instance.volume * (attenuation_difference * t + old_attenuation);
*dst += v;
instance.current_offset += sample_stride as f32 * instance.tempo;
t += t_step;
}
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment