Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 14 You must be signed in to star a gist
  • Fork 2 You must be signed in to fork a gist
  • Save michaeldorner/746c659476429a86a9970faaa6f95ec4 to your computer and use it in GitHub Desktop.
Save michaeldorner/746c659476429a86a9970faaa6f95ec4 to your computer and use it in GitHub Desktop.
An FM Synthesizer in Swift using AVAudioEngine
import AVFoundation
import Foundation
// The single FM synthesizer instance.
private let gFMSynthesizer: FMSynthesizer = FMSynthesizer()
public class FMSynthesizer {
// The maximum number of audio buffers in flight. Setting to two allows one
// buffer to be played while the next is being written.
private let kInFlightAudioBuffers: Int = 2
// The number of audio samples per buffer. A lower value reduces latency for
// changes but requires more processing but increases the risk of being unable
// to fill the buffers in time. A setting of 1024 represents about 23ms of
// samples.
private let kSamplesPerBuffer: AVAudioFrameCount = 1024
// The audio engine manages the sound system.
private let audioEngine: AVAudioEngine = AVAudioEngine()
// The player node schedules the playback of the audio buffers.
private let playerNode: AVAudioPlayerNode = AVAudioPlayerNode()
// Use standard non-interleaved PCM audio.
private let audioFormat = AVAudioFormat(standardFormatWithSampleRate: 44100.0, channels: 2)
// A circular queue of audio buffers.
private let audioBuffers: [AVAudioPCMBuffer] = [AVAudioPCMBuffer]()
// The index of the next buffer to fill.
private var bufferIndex: Int = 0
// The dispatch queue to render audio samples.
private let audioQueue: dispatch_queue_t = dispatch_queue_create("FMSynthesizerQueue", DISPATCH_QUEUE_SERIAL)
// A semaphore to gate the number of buffers processed.
private let audioSemaphore: dispatch_semaphore_t
public class func sharedSynth() -> FMSynthesizer {
return gFMSynthesizer
}
private init() {
// init the semaphore
audioSemaphore = dispatch_semaphore_create(kInFlightAudioBuffers)
// Create a pool of audio buffers.
audioBuffers = [AVAudioPCMBuffer](count: 2, repeatedValue: AVAudioPCMBuffer(PCMFormat: audioFormat, frameCapacity: UInt32(kSamplesPerBuffer)))
// Attach and connect the player node.
audioEngine.attachNode(playerNode)
audioEngine.connect(playerNode, to: audioEngine.mainMixerNode, format: audioFormat)
do {
try audioEngine.start()
} catch {
print("AudioEngine didn't start")
}
NSNotificationCenter.defaultCenter().addObserver(self, selector: #selector(FMSynthesizer.audioEngineConfigurationChange(_:)), name: AVAudioEngineConfigurationChangeNotification, object: audioEngine)
}
public func play(carrierFrequency: Float32, modulatorFrequency: Float32, modulatorAmplitude: Float32) {
let unitVelocity = Float32(2.0 * M_PI / audioFormat.sampleRate)
let carrierVelocity = carrierFrequency * unitVelocity
let modulatorVelocity = modulatorFrequency * unitVelocity
dispatch_async(audioQueue) {
var sampleTime: Float32 = 0
while true {
// Wait for a buffer to become available.
dispatch_semaphore_wait(self.audioSemaphore, DISPATCH_TIME_FOREVER)
// Fill the buffer with new samples.
let audioBuffer = self.audioBuffers[self.bufferIndex]
let leftChannel = audioBuffer.floatChannelData[0]
let rightChannel = audioBuffer.floatChannelData[1]
for sampleIndex in 0 ..< Int(self.kSamplesPerBuffer) {
let sample = sin(carrierVelocity * sampleTime + modulatorAmplitude * sin(modulatorVelocity * sampleTime))
leftChannel[sampleIndex] = sample
rightChannel[sampleIndex] = sample
sampleTime = sampleTime + 1.0
}
audioBuffer.frameLength = self.kSamplesPerBuffer
// Schedule the buffer for playback and release it for reuse after
// playback has finished.
self.playerNode.scheduleBuffer(audioBuffer) {
dispatch_semaphore_signal(self.audioSemaphore)
return
}
self.bufferIndex = (self.bufferIndex + 1) % self.audioBuffers.count
}
}
playerNode.pan = 0.8
playerNode.play()
}
@objc private func audioEngineConfigurationChange(notification: NSNotification) -> Void {
NSLog("Audio engine configuration change: \(notification)")
}
}
// Play a bell sound:
// FMSynthesizer.sharedSynth().play(440.0, modulatorFrequency: 679.0, modulatorAmplitude: 0.8)
Copy link

ghost commented Nov 23, 2016

Do you have an updated version of this for Swift 3?

@bj97301
Copy link

bj97301 commented Jan 9, 2017

eh? swift 3 version?

I am a swift noob at the moment so all those errors look pretty scary right now.

@bj97301
Copy link

bj97301 commented Jan 9, 2017

I got it working but its probably not pretty and has warnings:

import AVFoundation
import Foundation



// The single FM synthesizer instance.
let gFMSynthesizer: FMSynthesizer = FMSynthesizer()

class FMSynthesizer {
    
    // The maximum number of audio buffers in flight. Setting to two allows one
    // buffer to be played while the next is being written.
    var kInFlightAudioBuffers: Int = 2
    
    // The number of audio samples per buffer. A lower value reduces latency for
    // changes but requires more processing but increases the risk of being unable
    // to fill the buffers in time. A setting of 1024 represents about 23ms of
    // samples.
    let kSamplesPerBuffer: AVAudioFrameCount = 1024
    
    // The audio engine manages the sound system.
    let audioEngine: AVAudioEngine = AVAudioEngine()
    
    // The player node schedules the playback of the audio buffers.
    let playerNode: AVAudioPlayerNode = AVAudioPlayerNode()
    
    // Use standard non-interleaved PCM audio.
    let audioFormat = AVAudioFormat(standardFormatWithSampleRate: 44100.0, channels: 1)
    
    // A circular queue of audio buffers.
    var audioBuffers: [AVAudioPCMBuffer] = [AVAudioPCMBuffer]()
    
    // The index of the next buffer to fill.
    var bufferIndex: Int = 0
    
    // The dispatch queue to render audio samples.
    let audioQueue: DispatchQueue = DispatchQueue(label: "FMSynthesizerQueue", attributes: [])
    
    // A semaphore to gate the number of buffers processed.
    let audioSemaphore: DispatchSemaphore
    
    class func sharedSynth() -> FMSynthesizer {
        return gFMSynthesizer
    }
    
    public init() {
        // init the semaphore
        audioSemaphore = DispatchSemaphore(value: kInFlightAudioBuffers)
        
        // Create a pool of audio buffers.
        audioBuffers = [AVAudioPCMBuffer](repeating: AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: UInt32(kSamplesPerBuffer)), count: 2)
        
        // Attach and connect the player node.
        audioEngine.attach(playerNode)
        audioEngine.connect(playerNode, to: audioEngine.mainMixerNode, format: audioFormat)
        
        do {
            try audioEngine.start()
        } catch {
            print("AudioEngine didn't start")
        }
        
        NotificationCenter.default.addObserver(self, selector: #selector(FMSynthesizer.audioEngineConfigurationChange(_:)), name: NSNotification.Name.AVAudioEngineConfigurationChange, object: audioEngine)
    }
    
    func play(_ carrierFrequency: Float32, modulatorFrequency: Float32, modulatorAmplitude: Float32) {
        let unitVelocity = Float32(2.0 * M_PI / audioFormat.sampleRate)
        let carrierVelocity = carrierFrequency * unitVelocity
        let modulatorVelocity = modulatorFrequency * unitVelocity
        audioQueue.async {
            var sampleTime: Float32 = 0
            while true {
                // Wait for a buffer to become available.
                self.audioSemaphore.wait(timeout: DispatchTime.distantFuture)
                
                // Fill the buffer with new samples.
                let audioBuffer = self.audioBuffers[self.bufferIndex]
                let leftChannel = audioBuffer.floatChannelData?[0]
                let rightChannel = audioBuffer.floatChannelData?[1]
                for sampleIndex in 0 ..< Int(self.kSamplesPerBuffer) {
                    let sample = sin(carrierVelocity * sampleTime + modulatorAmplitude * sin(modulatorVelocity * sampleTime))
                    leftChannel?[sampleIndex] = sample
                    rightChannel?[sampleIndex] = sample
                    sampleTime = sampleTime + 1.0
                }
                audioBuffer.frameLength = self.kSamplesPerBuffer
                
                // Schedule the buffer for playback and release it for reuse after
                // playback has finished.
                self.playerNode.scheduleBuffer(audioBuffer) {
                    self.audioSemaphore.signal()
                    return
                }
                
                self.bufferIndex = (self.bufferIndex + 1) % self.audioBuffers.count
            }
        }
        
        playerNode.pan = 0.8
        playerNode.play()
    }
    
    @objc  func audioEngineConfigurationChange(_ notification: Notification) -> Void {
        NSLog("Audio engine configuration change: \(notification)")
    }    
}

// Play a bell sound:
// FMSynthesizer.sharedSynth().play(440.0, modulatorFrequency: 679.0, modulatorAmplitude: 0.8)

@justi
Copy link

justi commented Jan 23, 2017

Should be:
let audioFormat = AVAudioFormat(standardFormatWithSampleRate: 44100.0, channels: 2)

@rodydavis
Copy link

Swift 4:

`import AVFoundation
import Foundation

// The single FM synthesizer instance.
let gFMSynthesizer: FMSynthesizer = FMSynthesizer()

class FMSynthesizer {

// The maximum number of audio buffers in flight. Setting to two allows one
// buffer to be played while the next is being written.
var kInFlightAudioBuffers: Int = 2

// The number of audio samples per buffer. A lower value reduces latency for
// changes but requires more processing but increases the risk of being unable
// to fill the buffers in time. A setting of 1024 represents about 23ms of
// samples.
let kSamplesPerBuffer: AVAudioFrameCount = 1024

// The audio engine manages the sound system.
let audioEngine: AVAudioEngine = AVAudioEngine()

// The player node schedules the playback of the audio buffers.
let playerNode: AVAudioPlayerNode = AVAudioPlayerNode()

// Use standard non-interleaved PCM audio.
let audioFormat = AVAudioFormat(standardFormatWithSampleRate: 44100.0, channels: 1)

// A circular queue of audio buffers.
var audioBuffers: [AVAudioPCMBuffer] = [AVAudioPCMBuffer]()

// The index of the next buffer to fill.
var bufferIndex: Int = 0

// The dispatch queue to render audio samples.
let audioQueue: DispatchQueue = DispatchQueue(label: "FMSynthesizerQueue", attributes: [])

// A semaphore to gate the number of buffers processed.
let audioSemaphore: DispatchSemaphore

class func sharedSynth() -> FMSynthesizer {
    return gFMSynthesizer
}

public init() {
    // init the semaphore
    audioSemaphore = DispatchSemaphore(value: kInFlightAudioBuffers)
    
    // Create a pool of audio buffers.
    audioBuffers = [AVAudioPCMBuffer](repeating: AVAudioPCMBuffer(pcmFormat: audioFormat!, frameCapacity: UInt32(kSamplesPerBuffer))!, count: 2)
    
    // Attach and connect the player node.
    audioEngine.attach(playerNode)
    audioEngine.connect(playerNode, to: audioEngine.mainMixerNode, format: audioFormat)
    
    do {
        try audioEngine.start()
    } catch {
        print("AudioEngine didn't start")
    }
    
    NotificationCenter.default.addObserver(self, selector: #selector(FMSynthesizer.audioEngineConfigurationChange(_:)), name: NSNotification.Name.AVAudioEngineConfigurationChange, object: audioEngine)
}

func play(_ carrierFrequency: Float32, modulatorFrequency: Float32, modulatorAmplitude: Float32) {
    let unitVelocity = Float32(2.0 * M_PI / (audioFormat?.sampleRate)!)
    let carrierVelocity = carrierFrequency * unitVelocity
    let modulatorVelocity = modulatorFrequency * unitVelocity
    audioQueue.async {
        var sampleTime: Float32 = 0
        while true {
            // Wait for a buffer to become available.
            self.audioSemaphore.wait(timeout: DispatchTime.distantFuture)
            
            // Fill the buffer with new samples.
            let audioBuffer = self.audioBuffers[self.bufferIndex]
            let leftChannel = audioBuffer.floatChannelData?[0]
            let rightChannel = audioBuffer.floatChannelData?[1]
            for sampleIndex in 0 ..< Int(self.kSamplesPerBuffer) {
                let sample = sin(carrierVelocity * sampleTime + modulatorAmplitude * sin(modulatorVelocity * sampleTime))
                leftChannel?[sampleIndex] = sample
                rightChannel?[sampleIndex] = sample
                sampleTime = sampleTime + 1.0
            }
            audioBuffer.frameLength = self.kSamplesPerBuffer
            
            // Schedule the buffer for playback and release it for reuse after
            // playback has finished.
            self.playerNode.scheduleBuffer(audioBuffer) {
                self.audioSemaphore.signal()
                return
            }
            
            self.bufferIndex = (self.bufferIndex + 1) % self.audioBuffers.count
        }
    }
    
    playerNode.pan = 0.8
    playerNode.play()
}

@objc  func audioEngineConfigurationChange(_ notification: Notification) -> Void {
    NSLog("Audio engine configuration change: \(notification)")
}

}

// Play a bell sound:
// FMSynthesizer.sharedSynth().play(440.0, modulatorFrequency: 679.0, modulatorAmplitude: 0.8)`

@PadraigK
Copy link

PadraigK commented Feb 23, 2018

This line:

// Create a pool of audio buffers.
audioBuffers = [AVAudioPCMBuffer](repeating: AVAudioPCMBuffer(pcmFormat: audioFormat!, 
                                                              frameCapacity: UInt32(kSamplesPerBuffer))!, 
                                  count: 2)

is not doing what we hope here. It creates a single AVAudioPCMBuffer and references it twice in the array instead of creating two separate buffers.

The following code will do what we expect.

var audioBuffers = [AVAudioPCMBuffer]()
for _ in 0..<kInFlightAudioBuffers {
  audioBuffers.append(AVAudioPCMBuffer(pcmFormat: audioFormat!,
                                       frameCapacity: kSamplesPerBuffer)!)

}

@DeclanMaybury
Copy link

The var at the start creates a local variable instead of modifying the existing audioBuffers array.

`audioBuffers = AVAudioPCMBuffer
for _ in 0..<kInFlightAudioBuffers {
audioBuffers.append(AVAudioPCMBuffer(pcmFormat: audioFormat!,
frameCapacity: kSamplesPerBuffer)!)

}`

Probably took me longer than it should have to realise why it wasn't working, haha

@carloshs94
Copy link

carloshs94 commented Apr 19, 2020

Hi guys, any clue on why the audio "cracks" when i power off the screen?
i've the Audio background mode active and i do set up the session

Copy link

ghost commented Apr 23, 2020

hmm, I get stuttering audio when I try to do this in a playground, any idea why?

@fcesc
Copy link

fcesc commented Oct 13, 2022

This works in Swift 5 but I get this warning on the line:
self.audioSemaphore.wait(timeout: DispatchTime.distantFuture)

"Thread running at QOS_CLASS_USER_INITIATED waiting on a lower QoS thread running at QOS_CLASS_DEFAULT. Investigate ways to avoid priority inversions"

/////////////////////

import AVFoundation
import Foundation

// The single FM synthesizer instance.
let gFMSynthesizer: FMSynthesizer = FMSynthesizer()

class FMSynthesizer {

// The maximum number of audio buffers in flight. Setting to two allows one
// buffer to be played while the next is being written.
var kInFlightAudioBuffers: Int = 2

// The number of audio samples per buffer. A lower value reduces latency for
// changes but requires more processing but increases the risk of being unable
// to fill the buffers in time. A setting of 1024 represents about 23ms of
// samples.
let kSamplesPerBuffer: AVAudioFrameCount = 1024

// The audio engine manages the sound system.
let audioEngine: AVAudioEngine = AVAudioEngine()

// The player node schedules the playback of the audio buffers.
let playerNode: AVAudioPlayerNode = AVAudioPlayerNode()

// Use standard non-interleaved PCM audio.
let audioFormat = AVAudioFormat(standardFormatWithSampleRate: 44100.0, channels: 2)

// A circular queue of audio buffers.
var audioBuffers: [AVAudioPCMBuffer] = [AVAudioPCMBuffer]()

// The index of the next buffer to fill.
var bufferIndex: Int = 0

// The dispatch queue to render audio samples.
let audioQueue: DispatchQueue = DispatchQueue(label: "FMSynthesizerQueue", attributes: [])

// A semaphore to gate the number of buffers processed.
let audioSemaphore: DispatchSemaphore

class func sharedSynth() -> FMSynthesizer {
    return gFMSynthesizer
}



public init() {
    // init the semaphore
    audioSemaphore = DispatchSemaphore(value: kInFlightAudioBuffers)
            
    audioBuffers = [AVAudioPCMBuffer]()
    for _ in 0..<kInFlightAudioBuffers {
      audioBuffers.append(AVAudioPCMBuffer(pcmFormat: audioFormat!, frameCapacity: kSamplesPerBuffer)!)
    }
    
    
    // Attach and connect the player node.
    audioEngine.attach(playerNode)
    audioEngine.connect(playerNode, to: audioEngine.mainMixerNode, format: audioFormat)
    
    do {
        try audioEngine.start()
    } catch {
        print("AudioEngine didn't start")
    }
    
    NotificationCenter.default.addObserver(self, selector: #selector(FMSynthesizer.audioEngineConfigurationChange(_:)), name: NSNotification.Name.AVAudioEngineConfigurationChange, object: audioEngine)
}



func play(_ carrierFrequency: Float32, modulatorFrequency: Float32, modulatorAmplitude: Float32) {
    let unitVelocity = Float32(2.0 * Double.pi / (audioFormat?.sampleRate)!)
    let carrierVelocity = carrierFrequency * unitVelocity
    let modulatorVelocity = modulatorFrequency * unitVelocity
    
    audioQueue.async {
        var sampleTime: Float32 = 0
        while true {
            // Wait for a buffer to become available.
            self.audioSemaphore.wait(timeout: DispatchTime.distantFuture)
            
            // Fill the buffer with new samples.
            let audioBuffer = self.audioBuffers[self.bufferIndex]
            let leftChannel = audioBuffer.floatChannelData?[0]
            let rightChannel = audioBuffer.floatChannelData?[1]
            for sampleIndex in 0 ..< Int(self.kSamplesPerBuffer) {
                let sample = sin(carrierVelocity * sampleTime + modulatorAmplitude * sin(modulatorVelocity * sampleTime))
                leftChannel?[sampleIndex] = sample
                rightChannel?[sampleIndex] = sample
                sampleTime = sampleTime + 1.0
            }
            audioBuffer.frameLength = self.kSamplesPerBuffer
            
            // Schedule the buffer for playback and release it for reuse after
            // playback has finished.
            self.playerNode.scheduleBuffer(audioBuffer) {
                self.audioSemaphore.signal()
                return
            }
            
            self.bufferIndex = (self.bufferIndex + 1) % self.audioBuffers.count
        }
    }
    
    playerNode.pan = 0.8
    playerNode.play()
}



@objc  func audioEngineConfigurationChange(_ notification: Notification) -> Void {
    NSLog("Audio engine configuration change: \(notification)")
}

}

// Play a bell sound:
// FMSynthesizer.sharedSynth().play(440.0, modulatorFrequency: 679.0, modulatorAmplitude: 0.8)

@ThruralDev
Copy link

ThruralDev commented Jan 4, 2024

@fcesc Thank you so much, this approach works fine for me and I can play a continuous sound with that code.
Can you tell me how I can play a sequence of AudioPcmBuffers with that approach, please? Each AudioPcmBuffer has also a size of 1024.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment