Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save medericmotte/b523acbc1c446ca889e7471afa5a9b2f to your computer and use it in GitHub Desktop.
Save medericmotte/b523acbc1c446ca889e7471afa5a9b2f to your computer and use it in GitHub Desktop.
Slight modification of the code of jsbain (@JonB on Pythonista) to have a sawtooth instead of a sine, and a filter.
kAudioObjectSystemObject = 1
kAudioHardwarePropertyDevices = int.from_bytes(b'dev#', byteorder='big')
kAudioHardwarePropertyDefaultInputDevice = int.from_bytes(b'dIn ', byteorder='big')
kAudioHardwarePropertyDefaultOutputDevice = int.from_bytes(b'dOut', byteorder='big')
kAudioObjectPropertyScopeGlobal = int.from_bytes(b'glob', byteorder='big')
kAudioObjectPropertyScopeInput = int.from_bytes(b'inpt', byteorder='big')
kAudioObjectPropertyScopeOutput = int.from_bytes(b'outp', byteorder='big')
kAudioObjectPropertyScopePlayThrough = int.from_bytes(b'ptru', byteorder='big')
kAudioObjectPropertyName = int.from_bytes(b'lnam', byteorder='big')
kAudioObjectPropertyModelName = int.from_bytes(b'lmod', byteorder='big')
kAudioObjectPropertyManufacturer = int.from_bytes(b'lmak', byteorder='big')
kAudioDevicePropertyNominalSampleRate = int.from_bytes(b'nsrt', byteorder='big')
kAudioDevicePropertyBufferFrameSize = int.from_bytes(b'fsiz', byteorder='big')
kAudioDevicePropertyBufferFrameSizeRange = int.from_bytes(b'fsz#', byteorder='big')
kAudioDevicePropertyUsesVariableBufferFrameSizes = int.from_bytes(b'vfsz', byteorder='big')
kAudioDevicePropertyStreamConfiguration = int.from_bytes(b'slay', byteorder='big')
kCFStringEncodingUTF8 = 0x08000100
kAudioObjectPropertyElementMaster = 0
kAudioUnitType_Output = int.from_bytes(b'auou', byteorder='big')
kAudioUnitManufacturer_Apple = int.from_bytes(b'appl', byteorder='big')
kAudioUnitSubType_GenericOutput = int.from_bytes(b'genr', byteorder='big')
kAudioUnitSubType_HALOutput = int.from_bytes(b'ahal', byteorder='big')
kAudioUnitSubType_DefaultOutput = int.from_bytes(b'def ', byteorder='big')
kAudioUnitSubType_RemoteIO = int.from_bytes(b'rioc', byteorder='big')
# The audio unit can do input from the device as well as output to the
# device. Bus 0 is used for the output side, bus 1 is used to get audio
# input from the device.
outputbus = 0
inputbus = 1
def error_number_to_string(num):
if num == kAudioUnitErr_InvalidProperty:
return "The property is not supported"
elif num == kAudioUnitErr_InvalidParameter:
return "The parameter is not supported"
elif num == kAudioUnitErr_InvalidElement:
return "The specified element is not valid"
elif num == kAudioUnitErr_NoConnection:
return "There is no connection (generally an audio unit is asked to render but it has" \
" not input from which to gather data)"
elif num == kAudioUnitErr_FailedInitialization:
return "The audio unit is unable to be initialized"
elif num == kAudioUnitErr_TooManyFramesToProcess:
return "When an audio unit is initialized it has a value which specifies the max" \
" number of frames it will be asked to render at any given time. If an audio" \
" unit is asked to render more than this, this error is returned."
elif num == kAudioUnitErr_InvalidFile:
return "If an audio unit uses external files as a data source, this error is returned" \
" if a file is invalid (Apple's DLS synth returns this error)"
elif num == kAudioUnitErr_UnknownFileType:
return "If an audio unit uses external files as a data source, this error is returned" \
" if a file is invalid (Apple's DLS synth returns this error)"
elif num == kAudioUnitErr_FileNotSpecified:
return "If an audio unit uses external files as a data source, this error is returned" \
" if a file hasn't been set on it (Apple's DLS synth returns this error)"
elif num == kAudioUnitErr_FormatNotSupported:
return "Returned if an input or output format is not supported"
elif num == kAudioUnitErr_Uninitialized:
return "Returned if an operation requires an audio unit to be initialized and it is not."
elif num == kAudioUnitErr_InvalidScope:
return "The specified scope is invalid"
elif num == kAudioUnitErr_PropertyNotWritable:
return "The property cannot be written"
elif num == kAudioUnitErr_CannotDoInCurrentContext:
return "Returned when an audio unit is in a state where it can't perform the requested" \
" action now - but it could later. Its usually used to guard a render operation" \
" when a reconfiguration of its internal state is being performed."
elif num == kAudioUnitErr_InvalidPropertyValue:
return "The property is valid, but the value of the property being provided is not"
elif num == kAudioUnitErr_PropertyNotInUse:
return "Returned when a property is valid, but it hasn't been set to a valid value at this time."
elif num == kAudioUnitErr_Initialized:
return "Indicates the operation cannot be performed because the audio unit is initialized."
elif num == kAudioUnitErr_InvalidOfflineRender:
return "Used to indicate that the offline render operation is invalid. For instance," \
" when the audio unit needs to be pre-flighted, but it hasn't been."
elif num == kAudioUnitErr_Unauthorized:
return "Returned by either Open or Initialize, this error is used to indicate that the" \
" audio unit is not authorised, that it cannot be used. A host can then present" \
" a UI to notify the user the audio unit is not able to be used in its current state."
elif num == kAudioComponentErr_InstanceInvalidated:
return "the component instance's implementation is not available, most likely because the process" \
" that published it is no longer running"
else:
return "error number {}".format(num)
kAudioUnitErr_InvalidProperty = -10879
kAudioUnitErr_InvalidParameter = -10878
kAudioUnitErr_InvalidElement = -10877
kAudioUnitErr_NoConnection = -10876
kAudioUnitErr_FailedInitialization = -10875
kAudioUnitErr_TooManyFramesToProcess = -10874
kAudioUnitErr_InvalidFile = -10871
kAudioUnitErr_UnknownFileType = -10870
kAudioUnitErr_FileNotSpecified = -10869
kAudioUnitErr_FormatNotSupported = -10868
kAudioUnitErr_Uninitialized = -10867
kAudioUnitErr_InvalidScope = -10866
kAudioUnitErr_PropertyNotWritable = -10865
kAudioUnitErr_CannotDoInCurrentContext = -10863
kAudioUnitErr_InvalidPropertyValue = -10851
kAudioUnitErr_PropertyNotInUse = -10850
kAudioUnitErr_Initialized = -10849
kAudioUnitErr_InvalidOfflineRender = -10848
kAudioUnitErr_Unauthorized = -10847
kAudioComponentErr_InstanceInvalidated = -66749
kAudioUnitErr_RenderTimeout = -66745
kAudioOutputUnitProperty_CurrentDevice = 2000
kAudioOutputUnitProperty_EnableIO = 2003 # scope output, element 0 == output,
kAudioOutputUnitProperty_HasIO = 2006 # scope input, element 1 == input
kAudioOutputUnitProperty_IsRunning = 2001
kAudioOutputUnitProperty_ChannelMap = 2002
kAudioFormatLinearPCM = int.from_bytes(b'lpcm', byteorder='big')
kAudioFormatFlagIsFloat = 0x1
kAudioFormatFlagIsNonInterleaved = (1<< 5)
kAudioFormatFlagsNativeEndian =0
kAudioFormatFlagIsPacked = (1 << 3)
kAudioFormatFlagsNativeFloatPacked = kAudioFormatFlagIsFloat | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked
kAudioUnitProperty_StreamFormat = 8
kAudioUnitProperty_CPULoad = 6
kAudioUnitProperty_Latency = 12
kAudioUnitProperty_SupportedNumChannels = 13
kAudioUnitProperty_MaximumFramesPerSlice = 14
kAudioUnitProperty_SetRenderCallback = 23
kAudioOutputUnitProperty_SetInputCallback = 2005
kAudioUnitProperty_StreamFormat = 8
kAudioUnitProperty_SampleRate = 2
kAudioUnitProperty_ContextName = 25
kAudioUnitProperty_ElementName = 30
kAudioUnitProperty_NickName = 54
kAudioUnitScope_Global = 0 # The context for audio unit characteristics that apply to the audio unit as a whole
kAudioUnitScope_Input = 1 # The context for audio data coming into an audio unit
kAudioUnitScope_Output = 2 # The context for audio data leaving an audio unit
from objc_util import *
from ctypes import *
from coreaudioconstants import *
import numpy as np
#In the render method of the AudioRenderer, change 110 to f if you want to control the frequency
''' Adapted from https://www.cocoawithlove.com/2010/10/ios-tone-generator-introduction-to.html
'''
AudioUnitRenderActionFlags=c_uint32
OSStatus=c_int32
OSType=c_uint32
class SMPTETimeType(c_uint32):
kSMPTETimeType24 = 0
kSMPTETimeType25 = 1
kSMPTETimeType30Drop = 2
kSMPTETimeType30 = 3
kSMPTETimeType2997 = 4
kSMPTETimeType2997Drop = 5
kSMPTETimeType60 = 6
kSMPTETimeType5994 = 7
kSMPTETimeType60Drop = 8
kSMPTETimeType5994Drop = 9
kSMPTETimeType50 = 10
kSMPTETimeType2398 = 11
class SMPTETimeFlags(c_uint32):
kSMPTETimeUnknown = 0
kSMPTETimeValid = (1 << 0)
kSMPTETimeRunning = (1 << 1)
'''
/*!
@enum SMPTE State Flags
@abstract Flags that describe the SMPTE time state.
@constant kSMPTETimeValid
The full time is valid.
@constant kSMPTETimeRunning
Time is running.
'''
'''
/*!
@struct SMPTETime
@abstract A structure for holding a SMPTE time.
@field mSubframes
The number of subframes in the full message.
@field mSubframeDivisor
The number of subframes per frame (typically 80).
@field mCounter
The total number of messages received.
@field mType
The kind of SMPTE time using the SMPTE time type constants.
@field mFlags
A set of flags that indicate the SMPTE state.
@field mHours
The number of hours in the full message.
@field mMinutes
The number of minutes in the full message.
@field mSeconds
The number of seconds in the full message.
@field mFrames
The number of frames in the full message.
'''
class SMPTETime(Structure):
_fields_=[('mSubframes',c_int16),
('mSubframeDivisor',c_int16),
('mCounter',c_uint32),
('mType',SMPTETimeType),
('mFlags',SMPTETimeFlags),
('mHours',c_int16),
('mMinutes',c_int16),
('mSeconds',c_int16),
('mFrames',c_int16)]
'''/*!
@enum AudioTimeStamp Flags
@abstract The flags that indicate which fields in an AudioTimeStamp structure are valid.
@constant kAudioTimeStampSampleTimeValid
The sample frame time is valid.
@constant kAudioTimeStampHostTimeValid
The host time is valid.
@constant kAudioTimeStampRateScalarValid
The rate scalar is valid.
@constant kAudioTimeStampWordClockTimeValid
The word clock time is valid.
@constant kAudioTimeStampSMPTETimeValid
The SMPTE time is valid.
@constant kAudioTimeStampSampleHostTimeValid
The sample frame time and the host time are valid.
'''
class AudioTimeStampFlags(c_uint32):
kAudioTimeStampNothingValid = (0)
kAudioTimeStampSampleTimeValid = (1<< 0)
kAudioTimeStampHostTimeValid = (1 << 1)
kAudioTimeStampRateScalarValid = (1 << 2)
kAudioTimeStampWordClockTimeValid = (1 << 3)
kAudioTimeStampSMPTETimeValid = (1 << 4)
kAudioTimeStampSampleHostTimeValid = (kAudioTimeStampSampleTimeValid | kAudioTimeStampHostTimeValid)
'''
/*!
@struct AudioTimeStamp
@abstract A structure that holds different representations of the same point in time.
@field mSampleTime
The absolute sample frame time.
@field mHostTime
The host machine's time base, mach_absolute_time.
@field mRateScalar
The ratio of actual host ticks per sample frame to the nominal host ticks
per sample frame.
@field mWordClockTime
The word clock time.
@field mSMPTETime
The SMPTE time.
@field mFlags
A set of flags indicating which representations of the time are valid.
@field mReserved
Pads the structure out to force an even 8 byte alignment.
'''
class AudioTimeStamp(Structure):
_fields_=[('mSampleTime',c_double),
('mHostTime',c_int64),
('mRateScalar',c_double),
('mWordClockTime',c_uint64),
('mSMPTETime',SMPTETime),
('mFlags',AudioTimeStampFlags),
('mReserved',c_uint32)]
'''
/*!
@struct AudioBuffer
@abstract A structure to hold a buffer of audio data.
@field mNumberChannels
The number of interleaved channels in the buffer.
@field mDataByteSize
The number of bytes in the buffer pointed at by mData.
@field mData
A pointer to the buffer of audio data.
'''
class AudioBuffer(Structure):
_fields_=[('mNumberChannels',c_uint32),
('mDataByteSize',c_uint32),
('mData',c_void_p)]
class AudioBufferList(Structure):
'''/*!
@struct AudioBufferList
@abstract A variable length array of AudioBuffer structures.
@field mNumberBuffers
The number of AudioBuffers in the mBuffers array.
@field mBuffers
A variable length array of AudioBuffers.'''
_fields_=[('mNumberBuffers',c_uint32),
('mBuffers',AudioBuffer*1)]
class AudioComponentDescription(Structure):
_fields_=[
('componentType',OSType),
('componentSubType',OSType),
('componentManufacturer',OSType),
('componentFlags',c_uint32),
('componentFlagsMask',c_uint32)]
AudioComponentFindNext=c.AudioComponentFindNext
AudioComponentFindNext.argtypes=[c_void_p, POINTER(AudioComponentDescription)]
AudioComponentFindNext.restype=c_void_p
AudioComponentInstanceNew=c.AudioComponentInstanceNew
AudioComponentInstanceNew.argtypes=[c_void_p, c_void_p]
AudioComponentInstanceNew.restype=OSStatus
AudioUnitSetProperty=c.AudioUnitSetProperty
AudioUnitSetProperty.argtypes=[c_void_p, c_uint32, c_uint32, c_uint32, c_void_p, c_uint32]
AudioUnitSetProperty.restype=OSStatus
AudioUnitInitialize=c.AudioUnitInitialize
AudioUnitInitialize.argtypes=[c_void_p]
AudioUnitInitialize.restype=OSStatus
c.AudioOutputUnitStop.argtypes=[c_void_p]
c.AudioUnitUninitialize.argtypes=[c_void_p]
c.AudioComponentInstanceDispose.argtypes=[c_void_p]
AudioOutputUnitStart=c.AudioOutputUnitStart
AudioOutputUnitStart.argtypes=[c_void_p]
AudioOutputUnitStart.restype=OSStatus
def render_callback_prototype(inRefCon: c_void_p,
ioActionFlags:POINTER(AudioUnitRenderActionFlags),
inTimeStamp: POINTER(AudioTimeStamp) ,
inBusNumber: c_uint32,
inNumberFrames: c_uint32,
ioData:POINTER(AudioBufferList))->c_uint32:
pass
AURenderCallbackargs=list(render_callback_prototype.__annotations__.values())
AURenderCallback=CFUNCTYPE(AURenderCallbackargs[-1],*AURenderCallbackargs[0:-1])
class AURenderCallbackStruct(Structure):
_fields_=[('inputProc',AURenderCallback), ('inputProcRefCon',c_void_p)]
class AudioStreamBasicDescription(Structure):
_fields_ = [
("mSampleRate", c_double),
("mFormatID", c_uint),
("mFormatFlags", c_uint),
("mBytesPerPacket", c_uint),
("mFramesPerPacket", c_uint),
("mBytesPerFrame", c_uint),
("mChannelsPerFrame", c_uint),
("mBitsPerChannel", c_uint),
("mReserved", c_uint),
]
import math
class AudioRenderer(object):
def __init__(self, sampleRate=88200, freqHz=110, amplitude=0.1):
'''Create an audiounit, and bind to this instance'''
self.theta={}
self.sounds={}
filtersNumber=16
self.filtersBuffers= [0.0]*filtersNumber
# cutoffParam=1 will mean no filtering
self.cutoffParam=1
self.sampleRate=sampleRate
self.freqHz=freqHz
self.toneUnit=None
#self.setup_audiounit()
def render_callback(self,
inRefCon: c_void_p,
ioActionFlags:POINTER(AudioUnitRenderActionFlags),
inTimeStamp: POINTER(AudioTimeStamp) ,
inBusNumber: c_uint32,
inNumberFrames: c_uint32,
ioData:POINTER(AudioBufferList))->c_uint32:
''' call render(buffer,numFrames, timestamp)'''
try:
buffer = cast(ioData[0].mBuffers[0].mData, POINTER(c_float))
sampleTime=inTimeStamp[0].mSampleTime
return self.render(buffer, inNumberFrames, sampleTime)
except :
return -999
def render(self, buffer, numFrames, sampleTime):
'''override this with a method that fills buffer with numFrames'''
#print(self.sounds,self.theta,v.touches)
fb=self.filtersBuffers
for frame in range(numFrames):
b=0.0
cut=self.cutoffParam
for touch in self.sounds:
f,a=self.sounds[touch]
t=self.theta[touch]
dt=110.0/self.sampleRate
t+= dt
t=t%1
saw=2*t-1
self.theta[touch]=t
if (t < dt):
t /= dt
saw-= t + t - t * t - 1.0
elif (t > 1.0 - dt):
t = (t - 1.0) / dt
saw-= t * t + t + t + 1.0
b+=saw*1
#the a control (from 0 to 1) is used to change the cutoff by setting:
lerpfact=0.2
cut=(1-lerpfact)*cut+ lerpfact*a
#setting the first filter input to b=sawtooth wave
input= b
#start the loop around filters:
for f in range(len(fb)):
fb[f]=(1-cut)*fb[f]+cut*input
input = fb[f]
buffer[frame]=input
self.filtersBuffers=fb
self.cutoffParam=cut
return 0
def setup_audiounit(self):
defaultOutputDescription=AudioComponentDescription()
defaultOutputDescription.componentType = kAudioUnitType_Output
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
defaultOutput=AudioComponentFindNext(None, byref(defaultOutputDescription))
toneUnit=c_void_p(0)
err = AudioComponentInstanceNew(defaultOutput, byref(toneUnit))
if (err<0):
raise Exception(error_number_to_string(err))
print(err)
myinput=AURenderCallbackStruct()
myinput.inputProc=AURenderCallback(self.render_callback)
myinput.inputProcRefCon = UIApplication.sharedApplication().ptr
self.myinput=myinput
err=AudioUnitSetProperty(toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
byref(myinput),
sizeof(myinput) )
if (err<0):
raise Exception(error_number_to_string(err))
'''
// Set the format to 32 bit, single channel, floating point, linear PCM'''
streamFormat=AudioStreamBasicDescription()
streamFormat.mSampleRate = self.sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = 4
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = 4
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBitsPerChannel = 4*8
self.streamFormat=streamFormat
err = AudioUnitSetProperty (toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
byref(streamFormat),
sizeof(AudioStreamBasicDescription))
if (err<0):
raise Exception(error_number_to_string(err))
err = AudioUnitInitialize(toneUnit);
if (err<0):
raise Exception(error_number_to_string(err))
self.toneUnit=toneUnit
def start(self):
'''
// Start playback
'''
if not self.toneUnit:
self.setup_audiounit()
err = AudioOutputUnitStart(self.toneUnit);
if (err<0):
raise Exception(error_number_to_string(err))
def stop(self):
'''// Tear it down in reverse'''
toneUnit=self.toneUnit
c.AudioOutputUnitStop(toneUnit);
c.AudioUnitUninitialize(toneUnit);
c.AudioComponentInstanceDispose(toneUnit);
self.toneUnit=None
def _del__(self):
self.stop()
import time, ui
if __name__=='__main__':
r=AudioRenderer()
class Theramin(ui.View):
def __init__(self,*args,**kwargs):
ui.View.__init__(self,*args,**kwargs)
self.touches={}
self.multitouch_enabled=True
def draw(self):
for t in self.touches:
'''draw a finger
color goes as frequency, size as volume
'''
touch=self.touches[t]
a=r.sounds[t][1]
f=r.sounds[t][0]
R=a*150
ui.set_color(( (f%9000)/2500,(f%9000)/10000, (f%14000)/14000 ))
ui.Path.oval(touch.location.x-R,touch.location.y-R, 2*R,2*R).fill()
def touch_began(self, touch):
self.touches[touch.touch_id]=touch
r.sounds[touch.touch_id]=self.touch_to_sound(touch)
r.theta[touch.touch_id]=0
self.set_needs_display()
def touch_moved(self,touch):
if touch.touch_id in self.touches:
self.touches[touch.touch_id]=touch
r.sounds[touch.touch_id]=self.touch_to_sound(touch)
self.set_needs_display()
def touch_ended(self,touch):
if touch.touch_id in self.touches:
del self.touches[touch.touch_id]
del r.sounds[touch.touch_id]
del r.theta[touch.touch_id]
self.set_needs_display()
def touch_to_sound(self,t):
f=50+4000*t.location.x/self.width
a=t.location.y/self.height
return (f,a)
v=Theramin(frame=[0,0,576,576],bg_color='white')
b=ui.Button(frame=[0,0,100,100])
b.title='Go'
def toggle(sender):
if r.toneUnit:
r.stop()
sender.title='GO'
else:
r.start()
print(r.toneUnit)
sender.title='STOP'
b.action=toggle
v.add_subview(b)
v.present('sheet')
#r.start()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment