audioengine.py
from objc_util import * | |
AVAudioEngine = ObjCClass('AVAudioEngine').alloc().init() | |
AVAudioSession = ObjCClass('AVAudioSession') | |
AVAudioRecorder = ObjCClass('AVAudioRecorder') | |
shared_session = AVAudioSession.sharedInstance() | |
category_set= shared_session.setCategory_withOptions_error_( | |
ns('AVAudioSessionCategoryRecord'), | |
0x2, #duckothers | |
None) | |
shared_session.setMode_error_(ns('AVAudioSessionModeMeasurement'),None) | |
setActiveOptions = 0# notifyOthersOnDeactivation | |
shared_session.setActive_withOptions_error_(True,setActiveOptions,None) | |
inputNode = AVAudioEngine.inputNode() | |
# Configure the microphone input. | |
recordingFormat = inputNode.outputFormatForBus_(0) | |
def handler(_cmd,obj1_ptr,obj2_ptr): | |
# param1 = AVAudioPCMBuffer | |
# The buffer parameter is a buffer of audio captured | |
# from the output of an AVAudioNode. | |
# param2 = AVAudioTime | |
# The when parameter is the time the buffer was captured | |
if obj1_ptr: | |
obj1 = ObjCInstance(obj1_ptr) | |
#self.recognitionRequest?.append(buffer) | |
handler_block = ObjCBlock(handler, restype=None, argtypes=[c_void_p, c_void_p, c_void_p]) | |
inputNode.installTapOnBus_bufferSize_format_block_(0,1024,recordingFormat, handler_block) | |
AVAudioEngine.prepare() | |
err_ptr = c_void_p() | |
AVAudioEngine.startAndReturnError_(byref(err_ptr)) | |
if err_ptr: | |
err = ObjCInstance(err) | |
print(err) | |
# Create and configure the speech recognition request. | |
recognitionRequest = ObjCClass('SFSpeechAudioBufferRecognitionRequest').alloc() | |
print(dir(recognitionRequest)) | |
recognitionRequest.setShouldReportPartialResults_(True) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment