Skip to content

Instantly share code, notes, and snippets.

@moriyoshi
Created April 26, 2011 07:22
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save moriyoshi/941937 to your computer and use it in GitHub Desktop.
Save moriyoshi/941937 to your computer and use it in GitHub Desktop.
#include <stdio.h>
#include <AudioUnit/AudioUnit.h>
#include <CoreAudio/CoreAudio.h>
#include <unistd.h>
#include <stdint.h>
typedef struct {
AudioComponentInstance unit;
AudioStreamBasicDescription desc;
AudioBufferList *buffer;
} InputProcParams;
OSStatus InputProc(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
InputProcParams *params = inRefCon;
OSStatus err = noErr;
int i;
UInt32 offset;
err = AudioUnitRender(params->unit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, params->buffer);
if (err) {
printf("%ld\n", err);
return err;
}
offset = 0;
for (i = 0; i < params->buffer->mNumberBuffers; i++) {
const struct AudioBuffer *chunk;
uint8_t *p, *e;
if (offset >= inNumberFrames)
break;
chunk = &params->buffer->mBuffers[i];
p = chunk->mData;
e = (uint8_t *)chunk->mData + chunk->mDataByteSize;
while (p < e) {
if (offset >= inNumberFrames)
break;
printf("%f\n", (*(Float32*)p));
p += params->desc.mBitsPerChannel / 8;
offset++;
}
}
return noErr;
}
int main(int argc, char *argv)
{
AudioComponentInstance audioOutputUnit;
AudioStreamBasicDescription deviceFormat;
AudioStreamBasicDescription desiredFormat;
{
AudioComponent comp;
AudioComponentDescription desc;
//There are several different types of Audio Units.
//Some audio units serve as Outputs, Mixers, or DSP
//units. See AUComponent.h for listing
desc.componentType = kAudioUnitType_Output;
//Every Component has a subType, which will give a clearer picture
//of what this components function will be.
desc.componentSubType = kAudioUnitSubType_HALOutput;
//all Audio Units in AUComponent.h must use
//"kAudioUnitManufacturer_Apple" as the Manufacturer
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
//Finds a component that meets the desc spec's
comp = AudioComponentFindNext(NULL, &desc);
if (!comp)
return 1;
//gains access to the services provided by the component
AudioComponentInstanceNew(comp, &audioOutputUnit);
}
//When using AudioUnitSetProperty the 4th parameter in the method
//refer to an AudioUnitElement. When using an AudioOutputUnit
//the input element will be '1' and the output element will be '0'.
{
{
const UInt32 enableIO = 1;
AudioUnitSetProperty(audioOutputUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
1, // input element
&enableIO,
sizeof(enableIO));
}
{
const UInt32 enableIO = 0;
AudioUnitSetProperty(audioOutputUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
0, //output element
&enableIO,
sizeof(enableIO));
}
}
{
OSStatus err = noErr;
UInt32 size = sizeof(AudioDeviceID);
AudioObjectPropertyAddress addr = {
kAudioHardwarePropertyDefaultInputDevice,
kAudioObjectPropertyScopeGlobal,
0
};
AudioDeviceID inputDevice;
err = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&addr,
0, NULL,
&size, &inputDevice);
if (err)
return 1;
err = AudioUnitSetProperty(audioOutputUnit,
kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global,
0,
&inputDevice,
sizeof(inputDevice));
if (err)
return 1;
}
{
//Use CAStreamBasicDescriptions instead of 'naked'
//AudioStreamBasicDescriptions to minimize errors.
//CAStreamBasicDescription.h can be found in the CoreAudio SDK.
UInt32 size = sizeof(AudioStreamBasicDescription);
//Get the input device format
AudioUnitGetProperty(audioOutputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
1,
&deviceFormat,
&size);
printf("channels=%lu\n", deviceFormat.mChannelsPerFrame);
printf("sample rate=%lf\n", deviceFormat.mSampleRate);
printf("bits per channels=%d\n", deviceFormat.mBitsPerChannel);
//set the desired format to the device's sample rate
desiredFormat = deviceFormat;
desiredFormat.mFormatID = kAudioFormatLinearPCM;
desiredFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked;
desiredFormat.mChannelsPerFrame = 1;
desiredFormat.mBitsPerChannel = sizeof(Float32) * 8;
desiredFormat.mBytesPerFrame = desiredFormat.mBitsPerChannel / 8 * desiredFormat.mChannelsPerFrame;
desiredFormat.mFramesPerPacket = 1;
desiredFormat.mBytesPerPacket = desiredFormat.mBytesPerFrame * desiredFormat.mFramesPerPacket;
//set format to output scope
AudioUnitSetProperty(audioOutputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
1,
&desiredFormat,
sizeof(AudioStreamBasicDescription));
{
const SInt32 channelMap[] = { 0 };
AudioUnitSetProperty(audioOutputUnit,
kAudioOutputUnitProperty_ChannelMap,
kAudioUnitScope_Output,
1,
channelMap, sizeof(channelMap));
}
}
{
uint8_t buffer[sizeof(AudioBufferList) + 32768];
InputProcParams params = { audioOutputUnit, desiredFormat, (AudioBufferList *)buffer };
AURenderCallbackStruct input;
input.inputProc = InputProc;
input.inputProcRefCon = &params;
((AudioBufferList *)buffer)->mNumberBuffers = 1;
((AudioBufferList *)buffer)->mBuffers[0].mNumberChannels = 1;
((AudioBufferList *)buffer)->mBuffers[0].mDataByteSize = sizeof(buffer) - sizeof(AudioBufferList);
((AudioBufferList *)buffer)->mBuffers[0].mData = buffer + sizeof(AudioBufferList);
AudioUnitSetProperty(audioOutputUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
0,
&input, sizeof(input));
}
{
OSStatus err;
err = AudioUnitInitialize(audioOutputUnit);
if (err)
return 1;
err = AudioOutputUnitStart(audioOutputUnit);
if (err)
return 1;
}
sleep(10);
printf("done\n");
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment