Skip to content

Instantly share code, notes, and snippets.

@swillits
Created March 13, 2012 23:37
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save swillits/805b07756be5a9952388 to your computer and use it in GitHub Desktop.
Save swillits/805b07756be5a9952388 to your computer and use it in GitHub Desktop.
@implementation CAAudioRecorder
@synthesize delegate = mDelegate;
- (BOOL)recordFromDevice:(AudioDeviceID)inputDeviceID toURL:(NSURL *)fileURL error:(NSError **)outError;
{
AudioStreamBasicDescription aacFormat;
aacFormat.mSampleRate = 44100.0;
aacFormat.mFormatID = kAudioFormatMPEG4AAC;
aacFormat.mFormatFlags = 0;
aacFormat.mBytesPerPacket = 0;
aacFormat.mFramesPerPacket = 1024;
aacFormat.mBytesPerFrame = 0;
aacFormat.mChannelsPerFrame = 2;
aacFormat.mBitsPerChannel = 0;
aacFormat.mReserved = 0;
mInputDeviceID = inputDeviceID;
if (![self configureAudioUnits:outError]) {
return NO;
}
if (![self configureOutputFile:fileURL type:kAudioFileM4AType asbd:&aacFormat error:outError]) {
return NO;
}
return YES;
}
- (BOOL)startRecording:(NSError **)outError;
{
OSStatus error = AudioOutputUnitStart(mAudioOutputUnit);
if (error != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to start the audio output unit.", error);
return NO;
}
return YES;
}
- (BOOL)stopRecording:(NSError **)outError;
{
// Stop pulling audio data
OSStatus error = AudioOutputUnitStop(mAudioOutputUnit);
if (error != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to stop the audio output unit.", error);
return NO;
}
// Dispose our audio file reference
// Also responsible for flushing async data to disk
error = ExtAudioFileDispose(mOutputAudioFile);
if (error != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to close the audio file.", error);
return NO;
}
return YES;
}
#pragma mark -
- (uint64_t)numberOfBytesRecorded;
{
return 0;
}
@end
#pragma mark
@implementation CAAudioRecorder (Private)
- (void)stopDueToError:(NSError *)recordingError;
{
[self stopRecording:nil];
[self.delegate audioRecorder:self stoppedRecordingDueToError:recordingError];
}
- (BOOL)configureOutputFile:(NSURL *)fileURL type:(AudioFileTypeID)fileTypeID asbd:(AudioStreamBasicDescription *)inASBD error:(NSError **)outError;
{
OSStatus err = noErr;
AudioConverterRef audioConverterRef = NULL;
// Create new file
err = ExtAudioFileCreateWithURL((CFURLRef)fileURL, fileTypeID, inASBD, NULL, kAudioFileFlags_EraseFile, &mOutputAudioFile);
if (err != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to create the audio file.", err);
return NO;
}
// Inform the file what format the data is we're going to give it, should be pcm
// You must set this in order to encode or decode a non-PCM file data format.
err = ExtAudioFileSetProperty(mOutputAudioFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &mOutputFormat);
if (err != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to set the audio file's output format.", err);
return NO;
}
// If we're recording from a mono source, setup a simple channel map to split to stereo ---- (because the destination is stereo?)
if (mDeviceFormat.mChannelsPerFrame == 1 && mOutputFormat.mChannelsPerFrame == 2)
{
// Get the underlying AudioConverterRef
UInt32 size = sizeof(AudioConverterRef);
err = ExtAudioFileGetProperty(mOutputAudioFile, kExtAudioFileProperty_AudioConverter, &size, &audioConverterRef);
if (audioConverterRef) {
// The channel map should be as large as the number of output channels,
// each element specifies which input channel's data is routed to that output channel.
// Here each output channel is set to the first input channel (0)
SInt32 channelMap[2] = { 0, 0 };
err = AudioConverterSetProperty(audioConverterRef, kAudioConverterChannelMap, 2*sizeof(SInt32), channelMap);
if (err) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to set the channel map on the audio converter.", err);
return NO;
}
}
}
// Initialize async writes (read docs/header comments)
err = ExtAudioFileWriteAsync(mOutputAudioFile, 0, NULL);
if (err != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to initialize async writes to the audio file.", err);
return NO;
}
return YES;
}
- (BOOL)configureAudioUnits:(NSError **)outError;
{
AURenderCallbackStruct callback;
AudioComponentDescription description;
AudioComponent component;
OSStatus err = noErr;
UInt32 param;
UInt32 numAudioChannels;
UInt32 numFramesInBuffer;
// Open the AudioOutputUnit
// There are several different types of Audio Units.
// Some audio units serve as Outputs, Mixers, or DSP
// units. See AUComponent.h for listing
description.componentType = kAudioUnitType_Output;
description.componentSubType = kAudioUnitSubType_HALOutput;
description.componentManufacturer = kAudioUnitManufacturer_Apple;
description.componentFlags = 0;
description.componentFlagsMask = 0;
if ((component = AudioComponentFindNext(NULL, &description))) {
err = AudioComponentInstanceNew(component, &mAudioOutputUnit);
if (err != noErr) {
mAudioOutputUnit = NULL;
if (outError) *outError = NSErrorFromCoreAudioError(@"Could not open the output audio unit component.", err);
return NO;
}
} else {
if (outError) *outError = NSErrorFromCoreAudioError(@"Could not find the output audio unit component.", 0);
return NO;
}
// Configure the AudioOutputUnit
// You must enable the Audio Unit (AUHAL) for input and output for the same device.
// When using AudioUnitSetProperty the 4th parameter in the method
// refers to an AudioUnitElement. When using an AudioOutputUnit
// for input the element will be '1' and the output element will be '0'.
// Enable input on the AUHAL
param = 1;
err = AudioUnitSetProperty(mAudioOutputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &param, sizeof(UInt32));
if (err == noErr) {
// Disable Output on the AUHAL
param = 0;
err = AudioUnitSetProperty(mAudioOutputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &param, sizeof(UInt32));
if (err != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to disable output on the output audio unit.", err);
return NO;
}
} else {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to enable input on the output audio unity.", err);
return NO;
}
// Set the current device to the default input unit. (connect the output end of the input device, to the input end of the "output audio unit")
err = AudioUnitSetProperty(mAudioOutputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &mInputDeviceID, sizeof(AudioDeviceID));
if (err != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to set the audio unit input device.", err);
return NO;
}
// Setup render callback
callback.inputProc = AudioRecorderInputProc; // This will be called when the AUHAL has input data
callback.inputProcRefCon = self;
err = AudioUnitSetProperty(mAudioOutputUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(AURenderCallbackStruct));
if (err) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to setup the audio unit input callback.", err);
return NO;
}
// Get the audio format of the input device
param = sizeof(AudioStreamBasicDescription);
err = AudioUnitGetProperty(mAudioOutputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &mDeviceFormat, &param);
if (err != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to get the input device's audio format.", err);
return NO;
}
// Set the output PCM format to our liking. Linear, 32-bit floating point PCM with the sample rate and # of channels of the input evice.
numAudioChannels = MAX(mDeviceFormat.mChannelsPerFrame, 2);
mOutputFormat.mChannelsPerFrame = numAudioChannels;
mOutputFormat.mSampleRate = mDeviceFormat.mSampleRate; // *********** Change this to 44100.0 and it works fine since the file is 44100 too *****
mOutputFormat.mFormatID = kAudioFormatLinearPCM;
mOutputFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved;
if (mOutputFormat.mFormatID == kAudioFormatLinearPCM && numAudioChannels == 1)
mOutputFormat.mFormatFlags &= ~kLinearPCMFormatFlagIsNonInterleaved;
#if __BIG_ENDIAN__
mOutputFormat.mFormatFlags |= kAudioFormatFlagIsBigEndian;
#endif
mOutputFormat.mBitsPerChannel = sizeof(Float32) * 8;
mOutputFormat.mBytesPerFrame = mOutputFormat.mBitsPerChannel / 8;
mOutputFormat.mFramesPerPacket = 1;
mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame;
PrintASBD(mDeviceFormat);
PrintASBD(mOutputFormat);
// Set the format the AudioOutputUnit will output data in
err = AudioUnitSetProperty(mAudioOutputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &mOutputFormat, sizeof(AudioStreamBasicDescription));
if (err != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to set the output format of the audio unit.", err);
return NO;
}
// Get the number of frames in the IO buffer(s) --- kAudioUnitErr_InvalidProperty ????
param = sizeof(UInt32);
err = AudioUnitGetProperty(mAudioOutputUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &numFramesInBuffer, &param);
if (err != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to get the buffer frame size.", err);
return NO;
}
// Initialize the AU
err = AudioUnitInitialize(mAudioOutputUnit);
if (err != noErr) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to initialize the audio unit.", err);
return NO;
}
// Allocate our audio buffers
mAudioBuffer = AllocateAudioBufferList(mOutputFormat.mChannelsPerFrame, numFramesInBuffer * mOutputFormat.mBytesPerFrame);
if (mAudioBuffer == NULL) {
if (outError) *outError = NSErrorFromCoreAudioError(@"Failed to allocate the audio buffers.", 0);
return NO;
}
return YES;
}
- (OSStatus)audioRecorderInputWithFlags:(AudioUnitRenderActionFlags*)ioActionFlags timestamp:(const AudioTimeStamp*)inTimeStamp
bus:(UInt32)inBusNumber numberOfFrames:(UInt32)inNumberFrames bufferList:(AudioBufferList*)ioData;
{
OSStatus err = noErr;
// Render into audio buffer
err = AudioUnitRender(mAudioOutputUnit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, mAudioBuffer);
if (err) {
[self stopDueToError:NSErrorFromCoreAudioError(@"AudioUnitRender() failed.", err)];
return err;
}
// Write to file, ExtAudioFile auto-magicly handles conversion/encoding
// NOTE: Async writes may not be flushed to disk until a the file
// reference is disposed using ExtAudioFileDispose
err = ExtAudioFileWriteAsync(mOutputAudioFile, inNumberFrames, mAudioBuffer);
if (err) {
NSLog(@"ExtAudioFileWriteAsync Error (%d): %lld", (int)err, (long long)inNumberFrames);
[self stopDueToError:NSErrorFromCoreAudioError(@"ExtAudioFileWriteAsync() failed.", err)];
return err;
}
return err;
}
@end
#pragma mark -
// Convenience function to allocate our audio buffers
AudioBufferList * AllocateAudioBufferList(UInt32 numChannels, UInt32 size)
{
AudioBufferList* list;
UInt32 i;
list = (AudioBufferList*)calloc(1, sizeof(AudioBufferList) + numChannels * sizeof(AudioBuffer));
if(list == NULL)
return NULL;
list->mNumberBuffers = numChannels;
for(i = 0; i < numChannels; ++i) {
list->mBuffers[i].mNumberChannels = 1;
list->mBuffers[i].mDataByteSize = size;
list->mBuffers[i].mData = malloc(size);
if(list->mBuffers[i].mData == NULL) {
DestroyAudioBufferList(list);
return NULL;
}
}
return list;
}
// Convenience function to dispose of our audio buffers
void DestroyAudioBufferList(AudioBufferList* list)
{
UInt32 i;
if(list) {
for(i = 0; i < list->mNumberBuffers; i++) {
if(list->mBuffers[i].mData)
free(list->mBuffers[i].mData);
}
free(list);
}
}
OSStatus AudioRecorderInputProc(void* inRefCon, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData)
{
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
CAAudioRecorder * recorder = (CAAudioRecorder*)inRefCon;
OSStatus result = [recorder audioRecorderInputWithFlags:ioActionFlags timestamp:inTimeStamp bus:inBusNumber numberOfFrames:inNumberFrames bufferList:ioData];
[pool release];
return result;
}
NSError * NSErrorFromCoreAudioError(NSString * description, OSStatus errorCode)
{
if (errorCode == kAudioHardwareNoError) return nil;
if (!description) description = @"Core Audio error.";
NSString * reason = [NSString stringWithFormat:@"An error occurred. Error code: %d", errorCode];
return [NSError errorWithDomain:(@"CoreAudioErrorDomain") code:errorCode userInfo:[NSDictionary dictionaryWithObjectsAndKeys:
description, NSLocalizedDescriptionKey, reason, NSLocalizedRecoverySuggestionErrorKey, nil]];
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment