|
|
|
from objc_util import * |
|
from ctypes import * |
|
from coreaudioconstants import * |
|
import numpy as np |
|
|
|
''' Adapted from https://www.cocoawithlove.com/2010/10/ios-tone-generator-introduction-to.html |
|
''' |
|
|
|
AudioUnitRenderActionFlags=c_uint32 |
|
OSStatus=c_int32 |
|
OSType=c_uint32 |
|
class SMPTETimeType(c_uint32): |
|
kSMPTETimeType24 = 0 |
|
kSMPTETimeType25 = 1 |
|
kSMPTETimeType30Drop = 2 |
|
kSMPTETimeType30 = 3 |
|
kSMPTETimeType2997 = 4 |
|
kSMPTETimeType2997Drop = 5 |
|
kSMPTETimeType60 = 6 |
|
kSMPTETimeType5994 = 7 |
|
kSMPTETimeType60Drop = 8 |
|
kSMPTETimeType5994Drop = 9 |
|
kSMPTETimeType50 = 10 |
|
kSMPTETimeType2398 = 11 |
|
|
|
class SMPTETimeFlags(c_uint32): |
|
kSMPTETimeUnknown = 0 |
|
kSMPTETimeValid = (1 << 0) |
|
kSMPTETimeRunning = (1 << 1) |
|
''' |
|
/*! |
|
@enum SMPTE State Flags |
|
@abstract Flags that describe the SMPTE time state. |
|
@constant kSMPTETimeValid |
|
The full time is valid. |
|
@constant kSMPTETimeRunning |
|
Time is running. |
|
''' |
|
|
|
''' |
|
/*! |
|
@struct SMPTETime |
|
@abstract A structure for holding a SMPTE time. |
|
@field mSubframes |
|
The number of subframes in the full message. |
|
@field mSubframeDivisor |
|
The number of subframes per frame (typically 80). |
|
@field mCounter |
|
The total number of messages received. |
|
@field mType |
|
The kind of SMPTE time using the SMPTE time type constants. |
|
@field mFlags |
|
A set of flags that indicate the SMPTE state. |
|
@field mHours |
|
The number of hours in the full message. |
|
@field mMinutes |
|
The number of minutes in the full message. |
|
@field mSeconds |
|
The numbers of seconds in the full message. |
|
@field mFrames |
|
The number of frames in the full message. |
|
|
|
''' |
|
|
|
class SMPTETime(Structure): |
|
_fields_=[('mSubframes',c_int16), |
|
('mSubframeDivisor',c_int16), |
|
('mCounter',c_uint32), |
|
('mType',SMPTETimeType), |
|
('mFlags',SMPTETimeFlags), |
|
('mHours',c_int16), |
|
('mMinutes',c_int16), |
|
('mSeconds',c_int16), |
|
('mFrames',c_int16)] |
|
'''/*! |
|
@enum AudioTimeStamp Flags |
|
@abstract The flags that indicate which fields in an AudioTimeStamp structure are valid. |
|
@constant kAudioTimeStampSampleTimeValid |
|
The sample frame time is valid. |
|
@constant kAudioTimeStampHostTimeValid |
|
The host time is valid. |
|
@constant kAudioTimeStampRateScalarValid |
|
The rate scalar is valid. |
|
@constant kAudioTimeStampWordClockTimeValid |
|
The word clock time is valid. |
|
@constant kAudioTimeStampSMPTETimeValid |
|
The SMPTE time is valid. |
|
@constant kAudioTimeStampSampleHostTimeValid |
|
The sample frame time and the host time are valid. |
|
''' |
|
class AudioTimeStampFlags(c_uint32): |
|
kAudioTimeStampNothingValid = (0) |
|
kAudioTimeStampSampleTimeValid = (1<< 0) |
|
kAudioTimeStampHostTimeValid = (1 << 1) |
|
kAudioTimeStampRateScalarValid = (1 << 2) |
|
kAudioTimeStampWordClockTimeValid = (1 << 3) |
|
kAudioTimeStampSMPTETimeValid = (1 << 4) |
|
kAudioTimeStampSampleHostTimeValid = (kAudioTimeStampSampleTimeValid | kAudioTimeStampHostTimeValid) |
|
|
|
''' |
|
/*! |
|
@struct AudioTimeStamp |
|
@abstract A structure that holds different representations of the same point in time. |
|
@field mSampleTime |
|
The absolute sample frame time. |
|
@field mHostTime |
|
The host machine's time base, mach_absolute_time. |
|
@field mRateScalar |
|
The ratio of actual host ticks per sample frame to the nominal host ticks |
|
per sample frame. |
|
@field mWordClockTime |
|
The word clock time. |
|
@field mSMPTETime |
|
The SMPTE time. |
|
@field mFlags |
|
A set of flags indicating which representations of the time are valid. |
|
@field mReserved |
|
Pads the structure out to force an even 8 byte alignment. |
|
''' |
|
class AudioTimeStamp(Structure): |
|
_fields_=[('mSampleTime',c_double), |
|
('mHostTime',c_int64), |
|
('mRateScalar',c_double), |
|
('mWordClockTime',c_uint64), |
|
('mSMPTETime',SMPTETime), |
|
('mFlags',AudioTimeStampFlags), |
|
('mReserved',c_uint32)] |
|
''' |
|
/*! |
|
@struct AudioBuffer |
|
@abstract A structure to hold a buffer of audio data. |
|
@field mNumberChannels |
|
The number of interleaved channels in the buffer. |
|
@field mDataByteSize |
|
The number of bytes in the buffer pointed at by mData. |
|
@field mData |
|
A pointer to the buffer of audio data. |
|
''' |
|
|
|
class AudioBuffer(Structure): |
|
_fields_=[('mNumberChannels',c_uint32), |
|
('mDataByteSize',c_uint32), |
|
('mData',c_void_p)] |
|
|
|
class AudioBufferList(Structure): |
|
'''/*! |
|
@struct AudioBufferList |
|
@abstract A variable length array of AudioBuffer structures. |
|
@field mNumberBuffers |
|
The number of AudioBuffers in the mBuffers array. |
|
@field mBuffers |
|
A variable length array of AudioBuffers.''' |
|
_fields_=[('mNumberBuffers',c_uint32), |
|
('mBuffers',AudioBuffer*1)] |
|
|
|
|
|
|
|
|
|
class AudioComponentDescription(Structure): |
|
_fields_=[ |
|
('componentType',OSType), |
|
('componentSubType',OSType), |
|
('componentManufacturer',OSType), |
|
('componentFlags',c_uint32), |
|
('componentFlagsMask',c_uint32)] |
|
|
|
AudioComponentFindNext=c.AudioComponentFindNext |
|
AudioComponentFindNext.argtypes=[c_void_p, POINTER(AudioComponentDescription)] |
|
AudioComponentFindNext.restype=c_void_p |
|
|
|
AudioComponentInstanceNew=c.AudioComponentInstanceNew |
|
AudioComponentInstanceNew.argtypes=[c_void_p, c_void_p] |
|
AudioComponentInstanceNew.restype=OSStatus |
|
|
|
AudioUnitSetProperty=c.AudioUnitSetProperty |
|
AudioUnitSetProperty.argtypes=[c_void_p, c_uint32, c_uint32, c_uint32, c_void_p, c_uint32] |
|
AudioUnitSetProperty.restype=OSStatus |
|
|
|
AudioUnitInitialize=c.AudioUnitInitialize |
|
AudioUnitInitialize.argtypes=[c_void_p] |
|
AudioUnitInitialize.restype=OSStatus |
|
|
|
c.AudioOutputUnitStop.argtypes=[c_void_p] |
|
c.AudioUnitUninitialize.argtypes=[c_void_p] |
|
c.AudioComponentInstanceDispose.argtypes=[c_void_p] |
|
|
|
AudioOutputUnitStart=c.AudioOutputUnitStart |
|
AudioOutputUnitStart.argtypes=[c_void_p] |
|
AudioOutputUnitStart.restype=OSStatus |
|
|
|
|
|
def render_callback_prototype(inRefCon: c_void_p, |
|
ioActionFlags:POINTER(AudioUnitRenderActionFlags), |
|
inTimeStamp: POINTER(AudioTimeStamp) , |
|
inBusNumber: c_uint32, |
|
inNumberFrames: c_uint32, |
|
ioData:POINTER(AudioBufferList))->c_uint32: |
|
pass |
|
AURenderCallbackargs=list(render_callback_prototype.__annotations__.values()) |
|
AURenderCallback=CFUNCTYPE(AURenderCallbackargs[-1],*AURenderCallbackargs[0:-1]) |
|
|
|
class AURenderCallbackStruct(Structure): |
|
_fields_=[('inputProc',AURenderCallback), ('inputProcRefCon',c_void_p)] |
|
|
|
|
|
class AudioStreamBasicDescription(Structure): |
|
_fields_ = [ |
|
("mSampleRate", c_double), |
|
("mFormatID", c_uint), |
|
("mFormatFlags", c_uint), |
|
("mBytesPerPacket", c_uint), |
|
("mFramesPerPacket", c_uint), |
|
("mBytesPerFrame", c_uint), |
|
("mChannelsPerFrame", c_uint), |
|
("mBitsPerChannel", c_uint), |
|
("mReserved", c_uint), |
|
] |
|
|
|
import math |
|
class AudioRenderer(object): |
|
def __init__(self, sampleRate=44100, freqHz=110, amplitude=0.1): |
|
'''Create an audiounit, and bind to this instance''' |
|
self.theta={} |
|
self.sounds={} |
|
filtersNumber=4 |
|
self.filtersBuffers= [0.0]*filtersNumber |
|
# cutoffParam=1 will mean no filtering |
|
self.cutoffParam=1 |
|
self.sampleRate=sampleRate |
|
self.delayBuffer= [0.0]*self.sampleRate |
|
self.delayBufferIndex=0 |
|
self.vibratoTime=0.0 |
|
self.freqHz=freqHz |
|
self.toneUnit=None |
|
#self.setup_audiounit() |
|
def render_callback(self, |
|
inRefCon: c_void_p, |
|
ioActionFlags:POINTER(AudioUnitRenderActionFlags), |
|
inTimeStamp: POINTER(AudioTimeStamp) , |
|
inBusNumber: c_uint32, |
|
inNumberFrames: c_uint32, |
|
ioData:POINTER(AudioBufferList))->c_uint32: |
|
''' call render(buffer,numFrames, timestamp)''' |
|
try: |
|
buffer = cast(ioData[0].mBuffers[0].mData, POINTER(c_float)) |
|
sampleTime=inTimeStamp[0].mSampleTime |
|
return self.render(buffer, inNumberFrames, sampleTime) |
|
except : |
|
return -999 |
|
def render(self, buffer, numFrames, sampleTime): |
|
'''override this with a method that fills buffer with numFrames''' |
|
#print(self.sounds,self.theta,v.touches) |
|
fb=self.filtersBuffers |
|
for frame in range(numFrames): |
|
vibdetune=1+0.005*math.sin(2*math.pi*self.vibratoTime ) |
|
self.vibratoTime+=0.4/self.sampleRate |
|
self.vibratoTime=self.vibratoTime%1 |
|
b=0.0 |
|
cut=self.cutoffParam |
|
notes=[1,1.5,2,3,4] |
|
detune=[1.003,0.997,1.005,0.995] |
|
i=0 |
|
for touch in self.sounds: |
|
f,a=self.sounds[touch] |
|
for voice in range(len(self.theta[touch])): |
|
t=self.theta[touch][voice] |
|
dt=220.0*notes[i]*detune[voice]*vibdetune/self.sampleRate |
|
t+= dt |
|
t=t%1 |
|
saw=2*t-1 |
|
self.theta[touch][voice]=t |
|
if (t < dt): |
|
t /= dt |
|
saw-= t + t - t * t - 1.0 |
|
elif (t > 1.0 - dt): |
|
t = (t - 1.0) / dt |
|
saw-= t * t + t + t + 1.0 |
|
b+=saw*0.003 |
|
#the a control (from 0 to 1) is used to change the cutoff by setting: |
|
if i==0: |
|
lerpfact=0.5 |
|
cut=(1-lerpfact)*cut+ lerpfact*a |
|
i+=1 |
|
#setting the first filter input to b=sawtooth wave |
|
input= b |
|
#start the loop around filters: |
|
for f in range(len(fb)): |
|
fb[f]=(1-cut)*fb[f]+cut*input |
|
input = fb[f] |
|
|
|
|
|
#delay of one second: |
|
#mixing the data from one second ago: |
|
input+= 0.3*self.delayBuffer[self.delayBufferIndex] |
|
buffer[frame]=input |
|
#Storing our output in the delay buffer at the same place |
|
self.delayBuffer[self.delayBufferIndex]=input |
|
#incrementing the delay buffer index: |
|
self.delayBufferIndex= (self.delayBufferIndex+1)%len(self.delayBuffer) |
|
self.filtersBuffers=fb |
|
self.cutoffParam=cut |
|
return 0 |
|
def setup_audiounit(self): |
|
defaultOutputDescription=AudioComponentDescription() |
|
defaultOutputDescription.componentType = kAudioUnitType_Output |
|
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO |
|
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple; |
|
defaultOutputDescription.componentFlags = 0; |
|
defaultOutputDescription.componentFlagsMask = 0; |
|
|
|
defaultOutput=AudioComponentFindNext(None, byref(defaultOutputDescription)) |
|
|
|
toneUnit=c_void_p(0) |
|
err = AudioComponentInstanceNew(defaultOutput, byref(toneUnit)) |
|
if (err<0): |
|
raise Exception(error_number_to_string(err)) |
|
print(err) |
|
|
|
myinput=AURenderCallbackStruct() |
|
myinput.inputProc=AURenderCallback(self.render_callback) |
|
myinput.inputProcRefCon = UIApplication.sharedApplication().ptr |
|
self.myinput=myinput |
|
err=AudioUnitSetProperty(toneUnit, |
|
kAudioUnitProperty_SetRenderCallback, |
|
kAudioUnitScope_Input, |
|
0, |
|
byref(myinput), |
|
sizeof(myinput) ) |
|
if (err<0): |
|
raise Exception(error_number_to_string(err)) |
|
|
|
''' |
|
// Set the format to 32 bit, single channel, floating point, linear PCM''' |
|
streamFormat=AudioStreamBasicDescription() |
|
streamFormat.mSampleRate = self.sampleRate; |
|
streamFormat.mFormatID = kAudioFormatLinearPCM; |
|
streamFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved; |
|
streamFormat.mBytesPerPacket = 4 |
|
streamFormat.mFramesPerPacket = 1; |
|
streamFormat.mBytesPerFrame = 4 |
|
streamFormat.mChannelsPerFrame = 1; |
|
streamFormat.mBitsPerChannel = 4*8 |
|
self.streamFormat=streamFormat |
|
err = AudioUnitSetProperty (toneUnit, |
|
kAudioUnitProperty_StreamFormat, |
|
kAudioUnitScope_Input, |
|
0, |
|
byref(streamFormat), |
|
sizeof(AudioStreamBasicDescription)) |
|
if (err<0): |
|
raise Exception(error_number_to_string(err)) |
|
|
|
err = AudioUnitInitialize(toneUnit); |
|
if (err<0): |
|
raise Exception(error_number_to_string(err)) |
|
self.toneUnit=toneUnit |
|
|
|
def start(self): |
|
''' |
|
// Start playback |
|
''' |
|
if not self.toneUnit: |
|
self.setup_audiounit() |
|
err = AudioOutputUnitStart(self.toneUnit); |
|
if (err<0): |
|
raise Exception(error_number_to_string(err)) |
|
|
|
def stop(self): |
|
'''// Tear it down in reverse''' |
|
toneUnit=self.toneUnit |
|
c.AudioOutputUnitStop(toneUnit); |
|
c.AudioUnitUninitialize(toneUnit); |
|
c.AudioComponentInstanceDispose(toneUnit); |
|
self.toneUnit=None |
|
def _del__(self): |
|
self.stop() |
|
import time, ui |
|
if __name__=='__main__': |
|
r=AudioRenderer() |
|
class Theramin(ui.View): |
|
def __init__(self,*args,**kwargs): |
|
ui.View.__init__(self,*args,**kwargs) |
|
self.touches={} |
|
self.multitouch_enabled=True |
|
|
|
def draw(self): |
|
for t in self.touches: |
|
'''draw a finger |
|
color goes as frequency, size as volume |
|
''' |
|
touch=self.touches[t] |
|
a=r.sounds[t][1] |
|
f=r.sounds[t][0] |
|
R=a*150 |
|
ui.set_color(( (f%9000)/2500,(f%9000)/10000, (f%14000)/14000 )) |
|
ui.Path.oval(touch.location.x-R,touch.location.y-R, 2*R,2*R).fill() |
|
|
|
def touch_began(self, touch): |
|
self.touches[touch.touch_id]=touch |
|
|
|
r.sounds[touch.touch_id]=self.touch_to_sound(touch) |
|
r.theta[touch.touch_id]=[0.0,0.2,0.37,0.59] |
|
|
|
self.set_needs_display() |
|
def touch_moved(self,touch): |
|
if touch.touch_id in self.touches: |
|
self.touches[touch.touch_id]=touch |
|
r.sounds[touch.touch_id]=self.touch_to_sound(touch) |
|
self.set_needs_display() |
|
|
|
def touch_ended(self,touch): |
|
if touch.touch_id in self.touches: |
|
del self.touches[touch.touch_id] |
|
del r.sounds[touch.touch_id] |
|
del r.theta[touch.touch_id] |
|
self.set_needs_display() |
|
def touch_to_sound(self,t): |
|
f=50+4000*t.location.x/self.width |
|
a=t.location.y/self.height |
|
return (f,a) |
|
v=Theramin(frame=[0,0,576,576],bg_color='white') |
|
b=ui.Button(frame=[0,0,100,100]) |
|
b.title='Go' |
|
def toggle(sender): |
|
if r.toneUnit: |
|
r.stop() |
|
sender.title='GO' |
|
else: |
|
r.start() |
|
print(r.toneUnit) |
|
sender.title='STOP' |
|
b.action=toggle |
|
v.add_subview(b) |
|
v.present('sheet') |
|
#r.start() |