Created
January 3, 2013 00:51
-
-
Save yurydelendik/4439858 to your computer and use it in GitHub Desktop.
Plays tone using Audio Data API and Web Audio API
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<!doctype html> | |
<html> | |
<head> | |
<title></title> | |
<script> | |
// Resample sound using linear interpolation for Web Audio due to | |
// http://code.google.com/p/chromium/issues/detail?id=73062 | |
function AudioResampler(sourceRate, targetRate) { | |
this.sourceRate = sourceRate; | |
this.targetRate = targetRate; | |
this.tail = []; | |
this.sourceOffset = 0; | |
} | |
AudioResampler.prototype = { | |
ondatarequested: function (e) { }, | |
getData: function (channelsData, count) { | |
var k = this.sourceRate / this.targetRate; | |
var offset = this.sourceOffset; | |
var needed = Math.floor(count * k + offset + 1); | |
var sourceData = []; | |
for (var channel = 0; channel < channelsData.length; channel++) | |
sourceData.push(new Float32Array(needed)); | |
var e = { data: sourceData, count: needed }; | |
this.ondatarequested(e); | |
for (var channel = 0; channel < channelsData.length; channel++) { | |
var data = channelsData[channel]; | |
var source = sourceData[channel]; | |
for (var j = 0; j < count; j++) { | |
var i = j * k + offset; | |
var i1 = Math.floor(i), i2 = Math.ceil(i); | |
var source_i1 = i1 < 0 ? this.tail[channel] : source[i1]; | |
if (i1 === i2) { | |
data[j] = source_i1; | |
} else { | |
var alpha = i - i1; | |
data[j] = source_i1 * (1 - alpha) + source[i2] * alpha; | |
} | |
} | |
this.tail[channel] = source[needed - 1]; | |
} | |
this.sourceOffset = (count * k + offset) - needed; | |
} | |
}; | |
function WebAudioChannel(sampleRate, channels) { | |
var context; | |
if (typeof AudioContext !== 'undefined') | |
context = new AudioContext(); | |
else | |
context = new webkitAudioContext(); | |
this.context = context; | |
this.contextSampleRate = context.sampleRate || 44100; | |
this.channels = channels; | |
this.sampleRate = sampleRate; | |
if (this.contextSampleRate != sampleRate) { | |
this.resampler = new AudioResampler(sampleRate, this.contextSampleRate); | |
this.resampler.ondatarequested = function (e) { | |
this.requestData(e.data, e.count); | |
}.bind(this); | |
} | |
} | |
WebAudioChannel.prototype = { | |
ondatarequested: function () {}, | |
start: function () { | |
var source = this.context.createScriptProcessor ? | |
this.context.createScriptProcessor(512, 0, this.channels) : | |
this.context.createJavaScriptNode(512, 0, this.channels); | |
var self = this; | |
source.onaudioprocess = function(e) { | |
var channelsData = []; | |
for (var i = 0; i < self.channels; i++) | |
channelsData.push(e.outputBuffer.getChannelData(i)); | |
var count = channelsData[0].length; | |
if (self.resampler) { | |
self.resampler.getData(channelsData, count); | |
} else { | |
var e = { data: channelsData, count: count }; | |
self.requestData(channelsData, count); | |
} | |
}; | |
source.connect(this.context.destination); | |
this.source = source; | |
}, | |
stop: function () { | |
this.source.disconnect(this.context.destination); | |
}, | |
requestData: function (channelsData, count) { | |
var channels = this.channels; | |
var buffer = new Float32Array(count * channels); | |
var e = { data: buffer, count: buffer.length }; | |
this.ondatarequested(e); | |
for (var j = 0, p = 0; j < count; j++) { | |
for (var i = 0; i < channels; i++) | |
channelsData[i][j] = buffer[p++]; | |
} | |
} | |
}; | |
WebAudioChannel.isSupported = (function() { | |
return typeof AudioContext !== 'undefined' || | |
typeof webkitAudioContext != 'undefined'; | |
})(); | |
// from https://wiki.mozilla.org/Audio_Data_API | |
function AudioDataChannel(sampleRate, channels) { | |
this.sampleRate = sampleRate; | |
this.channels = channels; | |
} | |
AudioDataChannel.prototype = { | |
start: function () { | |
var sampleRate = this.sampleRate; | |
var channels = this.channels; | |
var self = this; | |
// Initialize the audio output. | |
var audio = new Audio(); | |
audio.mozSetup(channels, sampleRate); | |
var currentWritePosition = 0; | |
var prebufferSize = sampleRate * channels / 2; // buffer 500ms | |
var tail = null, tailPosition; | |
// The function called with regular interval to populate | |
// the audio output buffer. | |
this.interval = setInterval(function() { | |
var written; | |
// Check if some data was not written in previous attempts. | |
if(tail) { | |
written = audio.mozWriteAudio(tail.subarray(tailPosition)); | |
currentWritePosition += written; | |
tailPosition += written; | |
if(tailPosition < tail.length) { | |
// Not all the data was written, saving the tail... | |
return; // ... and exit the function. | |
} | |
tail = null; | |
} | |
// Check if we need add some data to the audio output. | |
var currentPosition = audio.mozCurrentSampleOffset(); | |
var available = currentPosition + prebufferSize - currentWritePosition; | |
available -= available % channels; // align to channels count | |
if(available > 0) { | |
// Request some sound data from the callback function. | |
var soundData = new Float32Array(available); | |
self.requestData(soundData, available); | |
// Writting the data. | |
written = audio.mozWriteAudio(soundData); | |
if(written < soundData.length) { | |
// Not all the data was written, saving the tail. | |
tail = soundData; | |
tailPosition = written; | |
} | |
currentWritePosition += written; | |
} | |
}, 100); | |
}, | |
stop: function () { | |
clearInterval(this.interval); | |
}, | |
requestData: function (data, count) { | |
this.ondatarequested({data: data, count: count}); | |
} | |
}; | |
AudioDataChannel.isSupported = (function () { | |
return 'mozSetup' in (new Audio); | |
})(); | |
var sourceRate = 48000; | |
var a = new Float32Array(100000); | |
for (var i = 0; i < 50000; i++) { | |
a[i] = Math.sin(Math.PI * 2 / sourceRate * i * 440) / 4; | |
} | |
var ai = 0; | |
var channels = 1; | |
var audio; | |
if (WebAudioChannel.isSupported) | |
audio = new WebAudioChannel(sourceRate, channels); | |
else if (AudioDataChannel.isSupported) | |
audio = new AudioDataChannel(sourceRate, channels); | |
else | |
throw 'unsupported'; | |
audio.ondatarequested = function (e) { | |
var channels = e.data; | |
for (var j = 0; j < e.count; j++) { | |
e.data[j] = a[ai++]; | |
if (ai > a.length) ai = 0; | |
} | |
}; | |
function playSound() { | |
audio.start(); | |
} | |
</script> | |
</head> | |
<body> | |
<button onclick="playSound();">test</button> | |
</body> | |
</html> |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment