Created
June 11, 2012 23:38
-
-
Save hpux735/2913436 to your computer and use it in GitHub Desktop.
Core Audio
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
struct AQPlayerState { | |
AudioStreamBasicDescription mDataFormat; // 2 | |
AudioQueueRef mQueue; // 3 | |
AudioQueueBufferRef mBuffers[kNumberBuffers]; // 4 | |
SInt64 mCurrentPacket; // 7 | |
UInt32 mNumPacketsToRead; // 8 | |
AudioStreamPacketDescription *mPacketDescs; // 9 | |
DAGAudioSinkNode *audioSink; | |
}; | |
+ (void)initDeviceDict | |
{ | |
// Variables used for each of the functions | |
UInt32 propertySize = 0; | |
Boolean writable = NO; | |
AudioObjectPropertyAddress property; | |
// Get the size of the device IDs array | |
property.mSelector = kAudioHardwarePropertyDevices; | |
property.mScope = kAudioObjectPropertyScopeGlobal; | |
property.mElement = kAudioObjectPropertyElementMaster; | |
AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, | |
&property, 0, NULL, &propertySize); | |
// Create the array for device IDs | |
AudioDeviceID *deviceIDs = (AudioDeviceID *)malloc(propertySize); | |
// Get the device IDs | |
AudioObjectGetPropertyData(kAudioObjectSystemObject, | |
&property, 0, NULL, | |
&propertySize, deviceIDs); | |
NSUInteger numDevices = propertySize / sizeof(AudioDeviceID); | |
// This is the array to hold the NSDictionaries | |
devices = [[NSMutableArray alloc] initWithCapacity:numDevices]; | |
// Get per-device information | |
for (int i = 0; i < numDevices; i++) { | |
NSMutableDictionary *deviceDict = [[NSMutableDictionary alloc] init]; | |
[deviceDict setValue:[NSNumber numberWithInt:i] | |
forKey:audioSourceDeviceIDKey]; | |
CFStringRef string; | |
// Get the name of the audio device | |
property.mSelector = kAudioObjectPropertyName; | |
property.mScope = kAudioObjectPropertyScopeGlobal; | |
property.mElement = kAudioObjectPropertyElementMaster; | |
propertySize = sizeof(string); | |
AudioObjectGetPropertyData(deviceIDs[i], &property, 0, NULL, | |
&propertySize, &string); | |
// Even though it's probably OK to use the CFString as an NSString | |
// I'm going to make a copy, just to be safe. | |
NSString *deviceName = [(NSString *)string copy]; | |
CFRelease(string); | |
[deviceDict setValue:deviceName | |
forKey:audioSourceNameKey]; | |
// The string given from the property has +1 retain, | |
// we need to make sure that we release it. | |
[deviceName release]; | |
// Get the UID of the device, used by the audioQueue | |
property.mSelector = kAudioDevicePropertyDeviceUID; | |
propertySize = sizeof(string); | |
AudioObjectGetPropertyData(deviceIDs[i], &property, 0, NULL, | |
&propertySize, &string); | |
// Again, copy to a NSString... | |
NSString *deviceUID = [(NSString *)string copy]; | |
CFRelease(string); | |
[deviceDict setValue:deviceUID | |
forKey:audioSourceDeviceUIDKey]; | |
[deviceUID release]; | |
// Get the nominal sample rate | |
Float64 currentSampleRate = 0; | |
propertySize = sizeof(currentSampleRate); | |
AudioDeviceGetProperty(deviceIDs[i], 0, NO, | |
kAudioDevicePropertyNominalSampleRate, | |
&propertySize, ¤tSampleRate); | |
[deviceDict setValue:[NSNumber numberWithFloat:currentSampleRate] | |
forKey:audioSourceNominalSampleRateKey]; | |
// Get an array of sample rates | |
AudioValueRange *sampleRates; | |
AudioDeviceGetPropertyInfo(deviceIDs[i], 0, NO, | |
kAudioDevicePropertyAvailableNominalSampleRates, | |
&propertySize, &writable); | |
sampleRates = (AudioValueRange *)malloc(propertySize); | |
AudioDeviceGetProperty(deviceIDs[i], 0, NO, | |
kAudioDevicePropertyAvailableNominalSampleRates, | |
&propertySize, sampleRates); | |
NSUInteger numSampleRates = propertySize / sizeof(AudioValueRange); | |
NSMutableArray *sampleRateTempArray = [[NSMutableArray alloc] init]; | |
for (int j = 0; j < numSampleRates; j++) { | |
// An NSRange is a location and length... | |
NSRange sampleRange; | |
sampleRange.length = sampleRates[j].mMaximum - sampleRates[j].mMinimum; | |
sampleRange.location = sampleRates[j].mMinimum; | |
[sampleRateTempArray addObject:[NSValue valueWithRange:sampleRange]]; | |
} | |
// Create a immutable copy of the available sample rate array | |
// and store it into the NSDict | |
NSArray *tempArray = [sampleRateTempArray copy]; | |
[sampleRateTempArray release]; | |
[deviceDict setValue:tempArray | |
forKey:audioSourceAvailableSampleRatesKey]; | |
[tempArray release]; | |
free(sampleRates); | |
// Get the number of output channels for the device | |
AudioBufferList bufferList; | |
propertySize = sizeof(bufferList); | |
AudioDeviceGetProperty(deviceIDs[i], 0, NO, | |
kAudioDevicePropertyStreamConfiguration, | |
&propertySize, &bufferList); | |
int outChannels, inChannels; | |
if (bufferList.mNumberBuffers > 0) { | |
outChannels = bufferList.mBuffers[0].mNumberChannels; | |
[deviceDict setValue:[NSNumber numberWithInt:outChannels] | |
forKey:audioSourceOutputChannelsKey]; | |
} else { | |
[deviceDict setValue:[NSNumber numberWithInt:0] | |
forKey:audioSourceOutputChannelsKey]; | |
} | |
// Again for input channels | |
propertySize = sizeof(bufferList); | |
AudioDeviceGetProperty(deviceIDs[i], 0, YES, | |
kAudioDevicePropertyStreamConfiguration, | |
&propertySize, &bufferList); | |
// The number of channels is the number of buffers. | |
// The actual buffers are NULL. | |
if (bufferList.mNumberBuffers > 0) { | |
inChannels = bufferList.mBuffers[0].mNumberChannels; | |
[deviceDict setValue:[NSNumber numberWithInt:inChannels] | |
forKey:audioSourceInputChannelsKey]; | |
} else { | |
[deviceDict setValue:[NSNumber numberWithInt:0] | |
forKey:audioSourceInputChannelsKey]; | |
} | |
// Add this new device dict to the array and release it | |
[devices addObject:deviceDict]; | |
[deviceDict release]; | |
} | |
} | |
// Enqueue a buffer | |
- (void)fillBuffer:(AudioQueueBufferRef)aqBuffer | |
{ | |
if (aqBuffer == nil) { | |
return; | |
} | |
aqBuffer->mPacketDescriptionCount = 0; | |
aqBuffer->mAudioDataByteSize = bufferSize; | |
NSData *inputData = nil; | |
// fill inBuffer with your data | |
OSStatus result = AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL); | |
} | |
- (bool)prepare { | |
swSampleRate = [[self sampleRate] floatValue]; | |
float secondsPerBlock = [[self blockSize] floatValue] / swSampleRate; | |
NSDictionary *deviceDict = [viewController getSelectedDevice]; | |
NSArray *sampleRates = [deviceDict objectForKey:audioSourceAvailableSampleRatesKey]; | |
// Setup the desired parameters from the Audio Queue | |
state.mDataFormat.mFormatID = kAudioFormatLinearPCM; | |
state.mDataFormat.mSampleRate = hwSampleRate; | |
state.mDataFormat.mChannelsPerFrame = channels; | |
state.mDataFormat.mBitsPerChannel = 8 * sizeof(Float32); | |
state.mDataFormat.mBytesPerPacket = channels * sizeof(Float32); | |
state.mDataFormat.mBytesPerFrame = channels * sizeof(Float32); | |
state.mDataFormat.mFramesPerPacket = 1; | |
state.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat | kLinearPCMFormatFlagIsPacked; | |
// Get the buffer size | |
bufferSize = [DAGAudioSinkNode bufferSizeWithQueue:state.mQueue | |
Desc:state.mDataFormat | |
Seconds:bufferDuration]; | |
// Create a block for the callback | |
AudioQueueOutputCallbackBlock callback = ^(AudioQueueRef inAQ, AudioQueueBufferRef inBuffer) { | |
HandleOutputBuffer(&state, inAQ, inBuffer);}; | |
// Create the new Audio Queue | |
OSStatus result = AudioQueueNewOutputWithDispatchQueue(&state.mQueue, &state.mDataFormat, 0, | |
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), | |
callback); | |
if (result != noErr) { | |
OSULogs(LOG_FAIL,@"Unable to create new input audio queue."); | |
return NO; | |
} | |
// Set the device for this audioQueue | |
CFStringRef deviceUID; | |
deviceUID = (CFStringRef)[deviceDict objectForKey:audioSourceDeviceUIDKey]; | |
UInt32 propertySize = sizeof(CFStringRef); | |
result = AudioQueueSetProperty(state.mQueue, | |
kAudioQueueProperty_CurrentDevice, | |
&deviceUID, propertySize); | |
if (result != noErr) { | |
NSLog(@"Unable to set audio queue device to %@", deviceUID); | |
return NO; | |
} | |
// Create a set of buffers | |
for (int i = 0; i < kNumberBuffers; ++i) { | |
AudioQueueAllocateBuffer(state.mQueue, bufferSize, &state.mBuffers[i]); | |
} | |
return YES; | |
} | |
- (void)run { | |
// Begin reading from the argument to prime the buffers | |
// read the numBuffers amount of buffers | |
for (int i = 0; i < kNumberBuffers; i++) { | |
[self fillBuffer:state.mBuffers[i]]; | |
} | |
UInt32 numberPrepared = 0; | |
UInt32 framesToPrime = (bufferSize / state.mDataFormat.mBytesPerFrame) * kNumberBuffers; | |
OSStatus result = AudioQueuePrime(state.mQueue, framesToPrime, &numberPrepared); | |
if (result != noErr) { | |
OSULogs(LOG_FAIL, @"Unable to prime the audio queue."); | |
return; | |
} else { | |
OSULogs(LOG_INFO, @"Primed with %d frames.", numberPrepared); | |
} | |
// Start audio | |
result = AudioQueueStart(state.mQueue, NULL); | |
if (result != noErr) { | |
OSULogs(LOG_FAIL, @"Unable to start the audio queue!"); | |
return; | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment