Create a gist now

Instantly share code, notes, and snippets.

What would you like to do?
AUHandler
#import <Foundation/Foundation.h>
@interface AUHandler : NSObject
- (id) initWithURL:(NSURL *)url;
- (NSError *)loadAudioWithURL:(NSURL *)url;
- (NSError *)play;
- (NSError *)pause;
@end
//
// AUHandler.m
// AUPlayer
//
#import "AUHandler.h"
#import <AudioToolbox/AudioToolbox.h>
/*
To be able to read individual samples of an audio track and display the waveform based on their data
To be able to render several waveforms (with the capability to add new waveforms when needed).
To be able to animate waveform rendering while the samples are read.
To add waveform zoom and time-shift functionality.
http://blog.denivip.ru/index.php/2016/04/how-to-optimize-waveform-rendering-in-ios/?lang=en
Drawing Frequency Spectrum
http://sweb.cityu.edu.hk/sm1204/2012A/page20/index.html
http://www.myuiviews.com/2016/03/04/visualizing-audio-frequency-spectrum-on-ios-via-accelerate-vdsp-fast-fourier-transform.html
*/
@interface AUHandler()
@property (nonatomic) AudioComponentInstance audioUnit;
@property (nonatomic) AudioFileStreamID audioFileID;
@property (nonatomic) AudioConverterRef converter;
@property (nonatomic) AudioBufferList *buffers;
@property (nonatomic) AudioStreamBasicDescription streamDescription;
@property (nonatomic) UInt32 bufferSize;
@property (nonatomic) NSMutableArray *packets;
@property (nonatomic) size_t readHead;
@property (nonatomic) bool stopped;
- (double)packetsPerSecond;
@end
static const OSStatus audioConverterCallbackErr_NoData = 'aand';
static OSStatus playerAURenderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData);
static void audioFileStreamPropertyListener(void* inClientData,
AudioFileStreamID inAudioFileStream,
AudioFileStreamPropertyID inPropertyID,
UInt32* ioFlags) ;
static void audioFileStreamPacketsCallback(void* inClientData,
UInt32 inNumberBytes,
UInt32 inNumberPackets,
const void* inInputData,
AudioStreamPacketDescription *inPacketDescriptions);
static OSStatus playerConverterFiller(AudioConverterRef inAudioConverter,
UInt32* ioNumberDataPackets,
AudioBufferList* ioData,
AudioStreamPacketDescription** outDataPacketDescription,
void* inUserData);
AudioStreamBasicDescription getAudioDescription(){
AudioStreamBasicDescription destFormat;
bzero(&destFormat, sizeof(AudioStreamBasicDescription));
destFormat.mSampleRate = 44100.0;
destFormat.mFormatID = kAudioFormatLinearPCM;
destFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
destFormat.mFramesPerPacket = 1;
destFormat.mBytesPerPacket = 4;
destFormat.mBytesPerFrame = 4;
destFormat.mChannelsPerFrame = 2;
destFormat.mBitsPerChannel = 16;
destFormat.mReserved = 0;
return destFormat;
}
@implementation AUHandler
- (double)packetsPerSecond {
if (_streamDescription.mFramesPerPacket) {
return _streamDescription.mSampleRate / _streamDescription.mFramesPerPacket;
}
return 44100.0/1152.0;
}
- (id) initWithURL:(NSURL *)url {
self = [super init];
if (self) {
[self buildAudioOutput];
[self openFileWithURL:url];
}
return self;
}
- (void)openFileWithURL:(NSURL *)url {
// not work //
NSURL *u = [NSURL URLWithString:@"https://antheawu.github.io/testPool/test.m4a"];
AudioFileID audioFileID0;
OSStatus result = AudioFileOpenURL((__bridge CFURLRef _Nonnull)(url), kAudioFileReadPermission, 0, &audioFileID0);
if (result == noErr) {
AudioFileTypeID audioFileTypeID = 0;
rl
UInt32 ps = sizeof(audioFileTypeID) ;
result = AudioFileGetProperty(audioFileID0, kAudioFilePropertyFileFormat,
&ps,
&audioFileTypeID);
// need to get format before opening file
result = AudioFileStreamOpen((__bridge void *)(self),
audioFileStreamPropertyListener,
audioFileStreamPacketsCallback,
audioFileTypeID, &_audioFileID);
// need an implementation of circulating file data....
NSData *data = [NSData data];
AudioFileStreamParseBytes(_audioFileID, (UInt32)[data length], [data bytes], 0);
}
}
- (void)buildAudioOutput {
_stopped = YES;
// 建立 remote IO node
AudioComponentDescription outputUnitDescription;
bzero(&outputUnitDescription, sizeof(AudioComponentDescription));
outputUnitDescription.componentType = kAudioUnitType_Output;
outputUnitDescription.componentSubType = kAudioUnitSubType_RemoteIO;
outputUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
outputUnitDescription.componentFlags = 0;
outputUnitDescription.componentFlagsMask = 0;
AudioComponent outputComponent = AudioComponentFindNext(NULL, &outputUnitDescription);
OSStatus status = AudioComponentInstanceNew(outputComponent, &_audioUnit);
NSAssert(noErr == status, @"Must be no error.");
// 設定 remote IO node 的輸入格式
AudioStreamBasicDescription audioFormat = getAudioDescription();
AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input, 0,
&audioFormat, sizeof(audioFormat));
// 設定 render callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProcRefCon = (__bridge void *)(self);
callbackStruct.inputProc = playerAURenderCallback;
status = AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global, 0,
&callbackStruct, sizeof(callbackStruct));
NSAssert(noErr == status, @"Must be no error.");
// 建立 converter 要使用的 buffer list
UInt32 bufferSize = 4096 * 4;
_bufferSize = bufferSize;
_buffers = (AudioBufferList *)calloc(1, sizeof(UInt32) + sizeof(AudioBuffer));
_buffers->mNumberBuffers = 1;
_buffers->mBuffers[0].mNumberChannels = 2;
_buffers->mBuffers[0].mDataByteSize = bufferSize;
_buffers->mBuffers[0].mData = calloc(1, bufferSize);
}
- (NSError *)loadAudioWithURL:(NSURL *)url {
return nil;
}
- (NSError *)play {
OSStatus status = AudioOutputUnitStart(_audioUnit);
NSAssert(noErr == status, @"AudioOutputUnitStart, error: %ld", (signed long)status);
return nil;
}
- (NSError *)pause {
OSStatus status = AudioOutputUnitStop(_audioUnit);
NSAssert(noErr == status, @"AudioOutputUnitStop, error: %ld", (signed long)status);
return nil;
}
- (void)_createAudioQueueWithAudioStreamDescription:(AudioStreamBasicDescription *)audioStreamBasicDescription
{
memcpy(&_streamDescription, audioStreamBasicDescription, sizeof(AudioStreamBasicDescription));
AudioStreamBasicDescription destFormat = getAudioDescription();
AudioConverterNew(&_streamDescription, &destFormat, &_converter);
}
- (void)_storePacketsWithNumberOfBytes:(UInt32)inNumberBytes
numberOfPackets:(UInt32)inNumberPackets
inputData:(const void *)inInputData
packetDescriptions:(AudioStreamPacketDescription *)inPacketDescriptions
{
for (int i = 0; i < inNumberPackets; ++i) {
SInt64 packetStart = inPacketDescriptions[i].mStartOffset;
UInt32 packetSize = inPacketDescriptions[i].mDataByteSize;
assert(packetSize > 0);
NSData *packet = [NSData dataWithBytes:inInputData + packetStart length:packetSize];
[_packets addObject:packet];
}
// 第五步,因為 parse 出來的 packets 夠多,緩衝內容夠大,因此開始
// 播放
if (_readHead == 0 && [_packets count] > (int)([self packetsPerSecond] * 3)) {
if (_stopped) {
[self play];
}
}
}
#pragma mark -
#pragma mark Properties
- (BOOL)isStopped
{
return _stopped;
}
- (OSStatus)callbackWithNumberOfFrames:(UInt32)inNumberOfFrames
ioData:(AudioBufferList *)inIoData busNumber:(UInt32)inBusNumber
{
@synchronized(self) {
if (_readHead < [_packets count]) {
@autoreleasepool {
UInt32 packetSize = inNumberOfFrames;
// 第七步: Remote IO node 的 render callback 中,呼叫 converter 將 packet 轉成 LPCM
OSStatus status = AudioConverterFillComplexBuffer(_converter,
playerConverterFiller,
(__bridge void *)(self),
&packetSize, _buffers, NULL);
if (noErr != status && audioConverterCallbackErr_NoData != status) {
[self pause];
return -1;
}
else if (!packetSize) {
inIoData->mNumberBuffers = 0;
}
else {
// Calculate waveform data here....
// 在這邊改變 renderBufferList->mBuffers[0].mData
// 可以產生各種效果
inIoData->mNumberBuffers = 1;
inIoData->mBuffers[0].mNumberChannels = 2;
inIoData->mBuffers[0].mDataByteSize = self.buffers->mBuffers[0].mDataByteSize;
inIoData->mBuffers[0].mData = self.buffers->mBuffers[0].mData;
self.buffers->mBuffers[0].mDataByteSize = self.bufferSize;
}
}
}
else {
inIoData->mNumberBuffers = 0;
return -1;
}
}
return noErr;
}
- (OSStatus)_fillConverterBufferWithBufferlist:(AudioBufferList *)ioData
packetDescription:(AudioStreamPacketDescription** )outDataPacketDescription
{
static AudioStreamPacketDescription aspdesc;
if (_readHead >= [_packets count]) {
return audioConverterCallbackErr_NoData;
}
ioData->mNumberBuffers = 1;
NSData *packet = self.packets[_readHead];
void const *data = [packet bytes];
UInt32 length = (UInt32)[packet length];
ioData->mBuffers[0].mData = (void *)data;
ioData->mBuffers[0].mDataByteSize = length;
*outDataPacketDescription = &aspdesc;
aspdesc.mDataByteSize = length;
aspdesc.mStartOffset = 0;
aspdesc.mVariableFramesInPacket = 1;
_readHead++;
return 0;
}
@end
void KKAudioFileStreamPropertyListener(void * inClientData,
AudioFileStreamID inAudioFileStream,
AudioFileStreamPropertyID inPropertyID,
UInt32 * ioFlags)
{
AUHandler *self = (__bridge AUHandler *)inClientData;
if (inPropertyID == kAudioFileStreamProperty_DataFormat) {
UInt32 dataSize = 0;
OSStatus status = 0;
AudioStreamBasicDescription audioStreamDescription;
Boolean writable = false;
status = AudioFileStreamGetPropertyInfo(inAudioFileStream,
kAudioFileStreamProperty_DataFormat, &dataSize, &writable);
status = AudioFileStreamGetProperty(inAudioFileStream,
kAudioFileStreamProperty_DataFormat, &dataSize, &audioStreamDescription);
NSLog(@"mSampleRate: %f", audioStreamDescription.mSampleRate);
NSLog(@"mFormatID: %u", audioStreamDescription.mFormatID);
NSLog(@"mFormatFlags: %u", audioStreamDescription.mFormatFlags);
NSLog(@"mBytesPerPacket: %u", audioStreamDescription.mBytesPerPacket);
NSLog(@"mFramesPerPacket: %u", audioStreamDescription.mFramesPerPacket);
NSLog(@"mBytesPerFrame: %u", audioStreamDescription.mBytesPerFrame);
NSLog(@"mChannelsPerFrame: %u", audioStreamDescription.mChannelsPerFrame);
NSLog(@"mBitsPerChannel: %u", audioStreamDescription.mBitsPerChannel);
NSLog(@"mReserved: %u", audioStreamDescription.mReserved);
// 第三步: Audio Parser 成功 parse 出 audio 檔案格式,我們根據
// 檔案格式資訊,建立 converter
[self _createAudioQueueWithAudioStreamDescription:&audioStreamDescription];
}
}
OSStatus playerAURenderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData) {
// 第六步: Remote IO node 的 render callback
AUHandler *self = (__bridge AUHandler *)inRefCon;
OSStatus status = [self callbackWithNumberOfFrames:inNumberFrames
ioData:ioData busNumber:inBusNumber];
if (status != noErr) {
ioData->mNumberBuffers = 0;
*ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
}
return status;
}
static void audioFileStreamPropertyListener(void* inClientData,
AudioFileStreamID inAudioFileStream,
AudioFileStreamPropertyID inPropertyID,
UInt32* ioFlags) {
AUHandler *self = (__bridge AUHandler *)inClientData;
if (inPropertyID == kAudioFileStreamProperty_DataFormat) {
UInt32 dataSize = 0;
OSStatus status = 0;
AudioStreamBasicDescription audioStreamDescription;
Boolean writable = false;
status = AudioFileStreamGetPropertyInfo(inAudioFileStream,
kAudioFileStreamProperty_DataFormat, &dataSize, &writable);
status = AudioFileStreamGetProperty(inAudioFileStream,
kAudioFileStreamProperty_DataFormat, &dataSize, &audioStreamDescription);
NSLog(@"mSampleRate: %f", audioStreamDescription.mSampleRate);
NSLog(@"mFormatID: %u", audioStreamDescription.mFormatID);
NSLog(@"mFormatFlags: %u", audioStreamDescription.mFormatFlags);
NSLog(@"mBytesPerPacket: %u", audioStreamDescription.mBytesPerPacket);
NSLog(@"mFramesPerPacket: %u", audioStreamDescription.mFramesPerPacket);
NSLog(@"mBytesPerFrame: %u", audioStreamDescription.mBytesPerFrame);
NSLog(@"mChannelsPerFrame: %u", audioStreamDescription.mChannelsPerFrame);
NSLog(@"mBitsPerChannel: %u", audioStreamDescription.mBitsPerChannel);
NSLog(@"mReserved: %u", audioStreamDescription.mReserved);
// 第三步: Audio Parser 成功 parse 出 audio 檔案格式,我們根據
// 檔案格式資訊,建立 converter
[self _createAudioQueueWithAudioStreamDescription:&audioStreamDescription];
}
}
static void audioFileStreamPacketsCallback(void* inClientData,
UInt32 inNumberBytes,
UInt32 inNumberPackets,
const void* inInputData,
AudioStreamPacketDescription *inPacketDescriptions){
// 第四步: Audio Parser 成功 parse 出 packets,我們將這些資料儲存
// 起來
AUHandler *self = (__bridge AUHandler *)inClientData;
[self _storePacketsWithNumberOfBytes:inNumberBytes
numberOfPackets:inNumberPackets
inputData:inInputData
packetDescriptions:inPacketDescriptions];
}
OSStatus playerConverterFiller(AudioConverterRef inAudioConverter,
UInt32* ioNumberDataPackets,
AudioBufferList* ioData,
AudioStreamPacketDescription** outDataPacketDescription,
void* inUserData) {
AUHandler *self = (__bridge AUHandler *)inUserData;
*ioNumberDataPackets = 0;
OSStatus result = [self _fillConverterBufferWithBufferlist:ioData
packetDescription:outDataPacketDescription];
if (result == noErr) {
*ioNumberDataPackets = 1;
}
return result;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment