secret
Created

Additive synthesis experiment in openFrameworks. Requires ofxAudioUnit, ofxTween. Will only work on OSX due to using Audio Units, as well as GCD (i.e. dispatch_async, dispatch_queue... etc).

  • Download Gist
main.cpp
C++
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#include "ofMain.h"
#include "testApp.h"
#include "ofAppGlutWindow.h"
 
//========================================================================
int main( ){
 
ofAppGlutWindow window;
ofSetupOpenGL(&window, 720,720, OF_WINDOW); // <-------- setup the GL context
 
// this kicks off the running of my app
// can be OF_WINDOW or OF_FULLSCREEN
// pass in width and height too:
ofRunApp( new testApp());
 
}
testApp.cpp
C++
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
#include "testApp.h"
#include <algorithm>
 
testApp::testApp()
: compressor(kAudioUnitType_Effect, kAudioUnitSubType_DynamicsProcessor)
, distortion(kAudioUnitType_Effect, kAudioUnitSubType_Distortion)
, audio_queue(dispatch_queue_create("admsyn.audioqueue", NULL))
, pitchIndex(0)
, log2N(12)
{
N = 1 << log2N;
fftData.realp = (float *)calloc(N / 2, sizeof(float));
fftData.imagp = (float *)calloc(N / 2, sizeof(float));
window = (float *)calloc(N, sizeof(float));
vDSP_hamm_window(window, N, 0);
fftSetup = vDSP_create_fftsetup(log2N, kFFTRadix2);
scratchBuffer = (AudioUnitSampleType *) calloc(N, sizeof(AudioUnitSampleType));
}
 
//--------------------------------------------------------------
void testApp::setup(){
turingImage.loadImage("multiscale-turing.png");
const size_t synthCount = 9;
pitches.push_back(0);
pitches.push_back(400);
pitches.push_back(-200);
pitches.push_back(700);
pitches.push_back(-700);
pitches.push_back(500);
pitches.push_back(-500);
pitches.push_back(1200);
pitches.push_back(-1200);
pitches.push_back(-1900);
mixer.setInputBusCount(synthCount);
synths.resize(synthCount);
for(int i = 0; i < synthCount; i++) {
synths[i].index = ofPoint((i / (float)synthCount) * turingImage.width, 0);
synths[i].increment = 0.2 * (i + 1);
synths[i].appRef = this;
synths[i].overlapBuffer = (AudioUnitSampleType *) calloc(N, sizeof(AudioUnitSampleType));
synths[i].pitchTween.setParameters(i, pitchEasing, ofxTween::easeOut, 0, pitches[nextPitch()], 1000, 0);
fillBuffer(&synths[i]);
synths[i].pitchShifter.setRenderCallback((AURenderCallbackStruct){RenderCallback, &synths[i]});
synths[i].pitchShifter.connectTo(synths[i].tap).connectTo(mixer, i);
mixer.setInputVolume(0.8, i);
mixer.setPan(sin((i / (float)synthCount) * (M_PI * 2.)), i);
}
// with distortion
// ------
// mixer.connectTo(distortion).connectTo(compressor).connectTo(output);
// distortion.showUI();
// ------
// without
mixer.connectTo(compressor).connectTo(output);
//
output.start();
// output.setDevice("Soundflower (2ch)"); for screen-cap
ofSetVerticalSync(true);
ofEnableSmoothing();
}
 
OSStatus testApp::RenderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
SynthContext * synth = static_cast<SynthContext *>(inRefCon);
int32_t availableBytes;
void * buffer = TPCircularBufferTail(&synth->renderBuffer, &availableBytes);
size_t availableSamples = availableBytes / sizeof(AudioUnitSampleType);
if(availableSamples >= inNumberFrames) {
const size_t bufferSize = inNumberFrames * sizeof(AudioUnitSampleType);
memcpy(ioData->mBuffers[0].mData, buffer, bufferSize);
TPCircularBufferConsume(&synth->renderBuffer, bufferSize);
} else {
memset(ioData->mBuffers[0].mData, 0, ioData->mBuffers[0].mDataByteSize);
}
for(int i = 1; i < ioData->mNumberBuffers; i++) {
memcpy(ioData->mBuffers[i].mData, ioData->mBuffers[0].mData, ioData->mBuffers[i].mDataByteSize);
}
dispatch_async(synth->appRef->audio_queue, ^{synth->appRef->fillBuffer(synth);});
return noErr;
}
 
void testApp::fillBuffer(SynthContext * synth)
{
int32_t availableBytes;
TPCircularBufferHead(&synth->renderBuffer, &availableBytes);
size_t availableSamples = availableBytes / sizeof(AudioUnitSampleType);
const size_t overlapAmount = 16;
const size_t overlapSampleCount = (N / overlapAmount);
ofPixelsRef turingPixels = turingImage.getPixelsRef();
while(availableSamples >= overlapSampleCount) {
memset(fftData.realp, 0, (N / 2) * sizeof(float));
memset(fftData.imagp, 0, (N / 2) * sizeof(float));
const size_t width = 25;
for(int i = 1; i < width; i++) {
ofColor c = turingPixels.getColor(synth->index.x + (i * 4), synth->index.y);
float level = (c.getBrightness() / 255.) * 0.03;
fftData.realp[i*2] = level;
}
synth->index.y += synth->increment;
if(synth->index.y >= turingPixels.getHeight()) {
synth->index.y -= turingPixels.getHeight();
float currentPitch = synth->pitchTween.update();
synth->pitchTween.setParameters(pitchEasing, ofxTween::easeInOut, currentPitch, pitches[nextPitch()], 1500, 0);
}
// fft & window
vDSP_fft_zrip(fftSetup, &fftData, 1, log2N, kFFTDirection_Inverse);
vDSP_ztoc(&fftData, 1, (COMPLEX *) scratchBuffer, 2, N / 2);
vDSP_vmul(scratchBuffer, 1, window, 1, scratchBuffer, 1, N);
// overlapping
memset(synth->overlapBuffer, 0, overlapSampleCount * sizeof(AudioUnitSampleType));
rotate(synth->overlapBuffer, synth->overlapBuffer + overlapSampleCount, synth->overlapBuffer + N);
vDSP_vadd(scratchBuffer, 1, synth->overlapBuffer, 1, synth->overlapBuffer, 1, N);
TPCircularBufferProduceBytes(&synth->renderBuffer, synth->overlapBuffer, overlapSampleCount * sizeof(AudioUnitSampleType));
TPCircularBufferHead(&synth->renderBuffer, &availableBytes);
availableSamples = availableBytes / sizeof(AudioUnitSampleType);
}
}
 
size_t testApp::nextPitch(){
pitchIndex++;
pitchIndex %= pitches.size();
return pitchIndex;
}
 
//--------------------------------------------------------------
void testApp::update(){
for(int i = 0; i < synths.size(); i++) {
synths[i].tap.getLeftWaveform(synths[i].waveform, ofGetWidth(), ofGetHeight() / (float)synths.size());
synths[i].pitchShifter.setParameter(kNewTimePitchParam_Pitch, kAudioUnitScope_Global, synths[i].pitchTween.update());
}
}
 
//--------------------------------------------------------------
void testApp::draw(){
ofBackground(0);
ofSetColor(255);
turingImage.draw(0, 0, ofGetHeight(), ofGetHeight());
// draw synth positions in image
__block vector<ofPoint> synthPoints;
dispatch_sync(audio_queue, ^{
for(int i = 0; i < synths.size(); i++) {
synthPoints.push_back(synths[i].index);
}
});
float ratio = ofGetHeight() / (float)turingImage.height;
ofSetColor(20, 180, 240);
ofSetLineWidth(5);
for(int i = 0; i < synthPoints.size(); i++) {
synthPoints[i] *= ratio;
ofPolyline line;
line.addVertex(synthPoints[i]);
synthPoints[i].x += ((50 * 4) * ratio);
line.addVertex(synthPoints[i]);
line.draw();
}
// draw waveforms
ofSetLineWidth(4);
ofSetColor(240, 180, 20);
ofPushMatrix();
{
for(int i = 0; i < synths.size(); i++) {
synths[i].waveform.draw();
ofTranslate(0, ofGetHeight() / (float)synths.size());
}
}
ofPopMatrix();
}
 
//--------------------------------------------------------------
void testApp::exit() {
output.stop();
dispatch_release(audio_queue);
}
 
void testApp::keyPressed(int key){}
void testApp::keyReleased(int key){}
void testApp::mouseMoved(int x, int y ){}
void testApp::mouseDragged(int x, int y, int button){}
void testApp::mousePressed(int x, int y, int button){}
void testApp::mouseReleased(int x, int y, int button){}
void testApp::windowResized(int w, int h){}
void testApp::gotMessage(ofMessage msg){}
void testApp::dragEvent(ofDragInfo dragInfo){}
testApp.hpp
C++
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
#pragma once
 
#include "ofMain.h"
#include "ofxAudioUnit.h"
#include "ofxTween.h"
#include "TPCircularBuffer.h"
#include <dispatch/dispatch.h>
#include <Accelerate/Accelerate.h>
 
class testApp;
 
struct SynthContext {
ofPoint index;
double increment;
TPCircularBuffer renderBuffer;
AudioUnitSampleType * overlapBuffer;
ofxAudioUnitTap tap;
ofxAudioUnit pitchShifter;
ofPolyline waveform;
testApp * appRef;
size_t bufferSize;
ofxTween pitchTween;
SynthContext(size_t bufferSize = 8192)
: pitchShifter(kAudioUnitType_FormatConverter, kAudioUnitSubType_NewTimePitch)
, tap(1024)
, bufferSize(bufferSize)
{
TPCircularBufferInit(&renderBuffer, bufferSize * sizeof(AudioUnitSampleType));
}
~SynthContext()
{
TPCircularBufferCleanup(&renderBuffer);
}
SynthContext(const SynthContext &orig)
: pitchShifter(orig.pitchShifter)
, tap(orig.tap)
, bufferSize(orig.bufferSize)
{
TPCircularBufferInit(&renderBuffer, bufferSize * sizeof(AudioUnitSampleType));
}
};
 
class testApp : public ofBaseApp {
 
public:
testApp();
void setup();
void update();
void draw();
void exit();
void keyPressed (int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void windowResized(int w, int h);
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
void fillBuffer(SynthContext * synth);
ofxAudioUnitOutput output;
ofxAudioUnit compressor;
ofxAudioUnit distortion;
ofxAudioUnitMixer mixer;
ofImage turingImage;
vector<SynthContext> synths;
vector<AudioUnitParameterValue> pitches;
size_t pitchIndex;
size_t nextPitch();
ofxEasingSine pitchEasing;
vector<float> fftAmplitude;
vector<float> fftPhase;
unsigned int N;
unsigned int log2N;
float * window;
AudioUnitSampleType * scratchBuffer;
FFTSetup fftSetup;
COMPLEX_SPLIT fftData;
dispatch_queue_t audio_queue;
static OSStatus RenderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData);
};

Please sign in to comment on this gist.

Something went wrong with that request. Please try again.