Skip to content

@admsyn /main.cpp secret
Created

Embed URL

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Additive synthesis experiment in openFrameworks. Requires ofxAudioUnit, ofxTween. Will only work on OSX due to using Audio Units, as well as GCD (i.e. dispatch_async, dispatch_queue... etc).
#include "ofMain.h"
#include "testApp.h"
#include "ofAppGlutWindow.h"
//========================================================================
int main( ){
ofAppGlutWindow window;
ofSetupOpenGL(&window, 720,720, OF_WINDOW); // <-------- setup the GL context
// this kicks off the running of my app
// can be OF_WINDOW or OF_FULLSCREEN
// pass in width and height too:
ofRunApp( new testApp());
}
#include "testApp.h"
#include <algorithm>
testApp::testApp()
: compressor(kAudioUnitType_Effect, kAudioUnitSubType_DynamicsProcessor)
, distortion(kAudioUnitType_Effect, kAudioUnitSubType_Distortion)
, audio_queue(dispatch_queue_create("admsyn.audioqueue", NULL))
, pitchIndex(0)
, log2N(12)
{
N = 1 << log2N;
fftData.realp = (float *)calloc(N / 2, sizeof(float));
fftData.imagp = (float *)calloc(N / 2, sizeof(float));
window = (float *)calloc(N, sizeof(float));
vDSP_hamm_window(window, N, 0);
fftSetup = vDSP_create_fftsetup(log2N, kFFTRadix2);
scratchBuffer = (AudioUnitSampleType *) calloc(N, sizeof(AudioUnitSampleType));
}
//--------------------------------------------------------------
void testApp::setup(){
turingImage.loadImage("multiscale-turing.png");
const size_t synthCount = 9;
pitches.push_back(0);
pitches.push_back(400);
pitches.push_back(-200);
pitches.push_back(700);
pitches.push_back(-700);
pitches.push_back(500);
pitches.push_back(-500);
pitches.push_back(1200);
pitches.push_back(-1200);
pitches.push_back(-1900);
mixer.setInputBusCount(synthCount);
synths.resize(synthCount);
for(int i = 0; i < synthCount; i++) {
synths[i].index = ofPoint((i / (float)synthCount) * turingImage.width, 0);
synths[i].increment = 0.2 * (i + 1);
synths[i].appRef = this;
synths[i].overlapBuffer = (AudioUnitSampleType *) calloc(N, sizeof(AudioUnitSampleType));
synths[i].pitchTween.setParameters(i, pitchEasing, ofxTween::easeOut, 0, pitches[nextPitch()], 1000, 0);
fillBuffer(&synths[i]);
synths[i].pitchShifter.setRenderCallback((AURenderCallbackStruct){RenderCallback, &synths[i]});
synths[i].pitchShifter.connectTo(synths[i].tap).connectTo(mixer, i);
mixer.setInputVolume(0.8, i);
mixer.setPan(sin((i / (float)synthCount) * (M_PI * 2.)), i);
}
// with distortion
// ------
// mixer.connectTo(distortion).connectTo(compressor).connectTo(output);
// distortion.showUI();
// ------
// without
mixer.connectTo(compressor).connectTo(output);
//
output.start();
// output.setDevice("Soundflower (2ch)"); for screen-cap
ofSetVerticalSync(true);
ofEnableSmoothing();
}
OSStatus testApp::RenderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
SynthContext * synth = static_cast<SynthContext *>(inRefCon);
int32_t availableBytes;
void * buffer = TPCircularBufferTail(&synth->renderBuffer, &availableBytes);
size_t availableSamples = availableBytes / sizeof(AudioUnitSampleType);
if(availableSamples >= inNumberFrames) {
const size_t bufferSize = inNumberFrames * sizeof(AudioUnitSampleType);
memcpy(ioData->mBuffers[0].mData, buffer, bufferSize);
TPCircularBufferConsume(&synth->renderBuffer, bufferSize);
} else {
memset(ioData->mBuffers[0].mData, 0, ioData->mBuffers[0].mDataByteSize);
}
for(int i = 1; i < ioData->mNumberBuffers; i++) {
memcpy(ioData->mBuffers[i].mData, ioData->mBuffers[0].mData, ioData->mBuffers[i].mDataByteSize);
}
dispatch_async(synth->appRef->audio_queue, ^{synth->appRef->fillBuffer(synth);});
return noErr;
}
void testApp::fillBuffer(SynthContext * synth)
{
int32_t availableBytes;
TPCircularBufferHead(&synth->renderBuffer, &availableBytes);
size_t availableSamples = availableBytes / sizeof(AudioUnitSampleType);
const size_t overlapAmount = 16;
const size_t overlapSampleCount = (N / overlapAmount);
ofPixelsRef turingPixels = turingImage.getPixelsRef();
while(availableSamples >= overlapSampleCount) {
memset(fftData.realp, 0, (N / 2) * sizeof(float));
memset(fftData.imagp, 0, (N / 2) * sizeof(float));
const size_t width = 25;
for(int i = 1; i < width; i++) {
ofColor c = turingPixels.getColor(synth->index.x + (i * 4), synth->index.y);
float level = (c.getBrightness() / 255.) * 0.03;
fftData.realp[i*2] = level;
}
synth->index.y += synth->increment;
if(synth->index.y >= turingPixels.getHeight()) {
synth->index.y -= turingPixels.getHeight();
float currentPitch = synth->pitchTween.update();
synth->pitchTween.setParameters(pitchEasing, ofxTween::easeInOut, currentPitch, pitches[nextPitch()], 1500, 0);
}
// fft & window
vDSP_fft_zrip(fftSetup, &fftData, 1, log2N, kFFTDirection_Inverse);
vDSP_ztoc(&fftData, 1, (COMPLEX *) scratchBuffer, 2, N / 2);
vDSP_vmul(scratchBuffer, 1, window, 1, scratchBuffer, 1, N);
// overlapping
memset(synth->overlapBuffer, 0, overlapSampleCount * sizeof(AudioUnitSampleType));
rotate(synth->overlapBuffer, synth->overlapBuffer + overlapSampleCount, synth->overlapBuffer + N);
vDSP_vadd(scratchBuffer, 1, synth->overlapBuffer, 1, synth->overlapBuffer, 1, N);
TPCircularBufferProduceBytes(&synth->renderBuffer, synth->overlapBuffer, overlapSampleCount * sizeof(AudioUnitSampleType));
TPCircularBufferHead(&synth->renderBuffer, &availableBytes);
availableSamples = availableBytes / sizeof(AudioUnitSampleType);
}
}
size_t testApp::nextPitch(){
pitchIndex++;
pitchIndex %= pitches.size();
return pitchIndex;
}
//--------------------------------------------------------------
void testApp::update(){
for(int i = 0; i < synths.size(); i++) {
synths[i].tap.getLeftWaveform(synths[i].waveform, ofGetWidth(), ofGetHeight() / (float)synths.size());
synths[i].pitchShifter.setParameter(kNewTimePitchParam_Pitch, kAudioUnitScope_Global, synths[i].pitchTween.update());
}
}
//--------------------------------------------------------------
void testApp::draw(){
ofBackground(0);
ofSetColor(255);
turingImage.draw(0, 0, ofGetHeight(), ofGetHeight());
// draw synth positions in image
__block vector<ofPoint> synthPoints;
dispatch_sync(audio_queue, ^{
for(int i = 0; i < synths.size(); i++) {
synthPoints.push_back(synths[i].index);
}
});
float ratio = ofGetHeight() / (float)turingImage.height;
ofSetColor(20, 180, 240);
ofSetLineWidth(5);
for(int i = 0; i < synthPoints.size(); i++) {
synthPoints[i] *= ratio;
ofPolyline line;
line.addVertex(synthPoints[i]);
synthPoints[i].x += ((50 * 4) * ratio);
line.addVertex(synthPoints[i]);
line.draw();
}
// draw waveforms
ofSetLineWidth(4);
ofSetColor(240, 180, 20);
ofPushMatrix();
{
for(int i = 0; i < synths.size(); i++) {
synths[i].waveform.draw();
ofTranslate(0, ofGetHeight() / (float)synths.size());
}
}
ofPopMatrix();
}
//--------------------------------------------------------------
void testApp::exit() {
output.stop();
dispatch_release(audio_queue);
}
void testApp::keyPressed(int key){}
void testApp::keyReleased(int key){}
void testApp::mouseMoved(int x, int y ){}
void testApp::mouseDragged(int x, int y, int button){}
void testApp::mousePressed(int x, int y, int button){}
void testApp::mouseReleased(int x, int y, int button){}
void testApp::windowResized(int w, int h){}
void testApp::gotMessage(ofMessage msg){}
void testApp::dragEvent(ofDragInfo dragInfo){}
#pragma once
#include "ofMain.h"
#include "ofxAudioUnit.h"
#include "ofxTween.h"
#include "TPCircularBuffer.h"
#include <dispatch/dispatch.h>
#include <Accelerate/Accelerate.h>
class testApp;
struct SynthContext {
ofPoint index;
double increment;
TPCircularBuffer renderBuffer;
AudioUnitSampleType * overlapBuffer;
ofxAudioUnitTap tap;
ofxAudioUnit pitchShifter;
ofPolyline waveform;
testApp * appRef;
size_t bufferSize;
ofxTween pitchTween;
SynthContext(size_t bufferSize = 8192)
: pitchShifter(kAudioUnitType_FormatConverter, kAudioUnitSubType_NewTimePitch)
, tap(1024)
, bufferSize(bufferSize)
{
TPCircularBufferInit(&renderBuffer, bufferSize * sizeof(AudioUnitSampleType));
}
~SynthContext()
{
TPCircularBufferCleanup(&renderBuffer);
}
SynthContext(const SynthContext &orig)
: pitchShifter(orig.pitchShifter)
, tap(orig.tap)
, bufferSize(orig.bufferSize)
{
TPCircularBufferInit(&renderBuffer, bufferSize * sizeof(AudioUnitSampleType));
}
};
class testApp : public ofBaseApp {
public:
testApp();
void setup();
void update();
void draw();
void exit();
void keyPressed (int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void windowResized(int w, int h);
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
void fillBuffer(SynthContext * synth);
ofxAudioUnitOutput output;
ofxAudioUnit compressor;
ofxAudioUnit distortion;
ofxAudioUnitMixer mixer;
ofImage turingImage;
vector<SynthContext> synths;
vector<AudioUnitParameterValue> pitches;
size_t pitchIndex;
size_t nextPitch();
ofxEasingSine pitchEasing;
vector<float> fftAmplitude;
vector<float> fftPhase;
unsigned int N;
unsigned int log2N;
float * window;
AudioUnitSampleType * scratchBuffer;
FFTSetup fftSetup;
COMPLEX_SPLIT fftData;
dispatch_queue_t audio_queue;
static OSStatus RenderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData);
};
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Something went wrong with that request. Please try again.