|
<!-- test-av-record-client.html --> |
|
<!-- RECORD THE AUDIOVISUAL EFFECT, SHOWING CONTINUAL PROGRESS FROM ALL COMPONENTS --> |
|
<h2>Load and process audio, render frames, send frames to server, create video.</h2> |
|
<style> |
|
.progressContainer { |
|
display: flex; |
|
display:-webkit-flex; |
|
width: 100%; |
|
padding: 10px; |
|
-webkit-justify-content: space-around; |
|
justify-content: space-around; |
|
} |
|
|
|
.progressItem { |
|
width: 33.33%; |
|
} |
|
|
|
.hidden { |
|
display: none; |
|
} |
|
</style> |
|
|
|
<div id="selectorForm" class="progressContainer"> |
|
<p>Select an audio file: |
|
<input type="file" id="fileSelector" name="file" /></p> |
|
</div> |
|
|
|
<div class="progressContainer"> |
|
<div class="progressItem"><b>[PAGE]</b></div> |
|
<div class="progressItem"><b>[WORKER]</b></div> |
|
<div class="progressItem"><b>[SERVER]</b></div> |
|
</div> |
|
|
|
<div id="progress" class="progressContainer"></div> |
|
|
|
<script src="https://rawgithub.com/mrdoob/three.js/master/build/three.js"></script> |
|
<script> |
|
|
|
// Frame rate |
|
const FPS = 48; |
|
var frameFreq = (1000/FPS)/1000; |
|
|
|
// A modulo function for object positioning |
|
Number.prototype.mod = function(n) { |
|
return ((this%n)+n)%n; |
|
}; |
|
|
|
// Time the overall process duration |
|
var processStart; |
|
var processEnd; |
|
var duration; |
|
|
|
// Create scene and renderer |
|
var scene = new THREE.Scene(); |
|
var camera = new THREE.PerspectiveCamera(75, 1920/1080, 0.1, 1000); |
|
var geometry = new THREE.BoxGeometry( 1, 1, 1 ); |
|
var material = new THREE.MeshNormalMaterial(); |
|
var cube = new THREE.Mesh( geometry, material ); |
|
var renderer = new THREE.WebGLRenderer(); |
|
var canvas = renderer.domElement; |
|
scene.add(cube); |
|
camera.position.z = 5; |
|
renderer.setSize(1920,1080); |
|
renderer.render(scene, camera); |
|
|
|
// Create a web worker to transmit the frames on another thread |
|
var worker; |
|
if (typeof(Worker) !== 'undefined') { |
|
worker = new Worker('test-av-worker.js'); |
|
worker.addEventListener('message', function(e) { |
|
var message = e.data; |
|
displayMessage(message.data, message.sender); |
|
}, false); |
|
} else { |
|
var message ='No web worker support'; |
|
displayMessage(message); |
|
throw new Error(message); |
|
} |
|
|
|
// Audio value object. |
|
// Values extracted on each audio sample that affect rendered scene. |
|
var audioVO = { |
|
context: null, |
|
sourceNode: null, |
|
javascriptNode: null, |
|
splitter: null, |
|
analyser: null, |
|
analyser2: null, |
|
analyserFreq: null, |
|
audio_freqArray: [], |
|
audio_lAverage: 0, |
|
audio_rAverage: 0, |
|
audioVolume: 0, |
|
audioBass: 0, |
|
audioHigh: 0, |
|
audioMid: 0 |
|
}; |
|
|
|
// Array of audioVOs, to be extracted during playback |
|
var audioVOs = []; |
|
|
|
// The loaded audio file and its name |
|
var audioFile = {}; |
|
|
|
// Handle audio file selection |
|
var selector = document.getElementById('fileSelector'); |
|
selector.addEventListener('change', onFileSelect, false); |
|
function onFileSelect(e) { |
|
var file = e.target.files[0]; |
|
audioFile.name = file.name; |
|
audioFile.file = file; |
|
displayMessage('Loading '+audioFile.name); |
|
reader.readAsArrayBuffer(file); |
|
} |
|
|
|
// Read the selected audio file |
|
var reader = new FileReader(); |
|
reader.onload = function (e) { |
|
processStart = Date.now(); // Begin timing the overall process |
|
var selectorForm = document.getElementById('selectorForm'); |
|
selectorForm.innerHTML = "Creating video for: "+ audioFile.name +" ..."; |
|
audioFile.data = e.target.result; |
|
displayMessage(audioFile.name + ' loaded'); |
|
setupAudioNodes(); |
|
decodeAudio(); |
|
}; |
|
|
|
// Set up audio nodes for playback and analysis |
|
function setupAudioNodes() { |
|
// If AudioContext is there, create the AudioVO and initialize its AudioContext |
|
window.AudioContext = window.AudioContext || window.webkitAudioContext; |
|
if (!window.AudioContext) { |
|
displayMessage('No AudioContext found!'); |
|
} else { |
|
// Set up the audio context |
|
audioVO.context = new AudioContext(); |
|
|
|
// Set up a javascript node |
|
audioVO.javascriptNode = audioVO.context.createScriptProcessor(512, 1, 1); |
|
|
|
// Set up channel and frequency analyzers |
|
audioVO.analyser = audioVO.context.createAnalyser(); |
|
audioVO.analyser.smoothingTimeConstant = 0.5; |
|
audioVO.analyser.fftSize = 1024; |
|
|
|
audioVO.analyser2 = audioVO.context.createAnalyser(); |
|
audioVO.analyser2.smoothingTimeConstant = 0.5; |
|
audioVO.analyser2.fftSize = 1024; |
|
|
|
audioVO.analyserFreq = audioVO.context.createAnalyser(); |
|
audioVO.analyserFreq.smoothingTimeConstant = 0.3; |
|
audioVO.analyserFreq.fftSize = 256; |
|
|
|
// Create a buffer source node and splitter |
|
audioVO.sourceNode = audioVO.context.createBufferSource(); |
|
audioVO.splitter = audioVO.context.createChannelSplitter(); |
|
|
|
// Connect buffer source node to frequency analyser and splitter |
|
audioVO.sourceNode.connect(audioVO.splitter); |
|
audioVO.sourceNode.connect(audioVO.analyserFreq); |
|
audioVO.sourceNode.onended = onPlaybackComplete; |
|
|
|
// Connect outputs from splitter to channel analysers |
|
audioVO.splitter.connect(audioVO.analyser, 0, 0); |
|
audioVO.splitter.connect(audioVO.analyser2, 1, 0); |
|
|
|
// Connect the analysers to the javascriptnode |
|
audioVO.analyser.connect(audioVO.javascriptNode); |
|
audioVO.analyser2.connect(audioVO.javascriptNode); |
|
|
|
// Connect the source node to the destination |
|
audioVO.sourceNode.connect(audioVO.context.destination); |
|
} |
|
} |
|
|
|
// Decode the loaded audio |
|
function decodeAudio() { |
|
displayMessage('Decoding audio...'); |
|
audioVO.context.decodeAudioData( |
|
audioFile.data, |
|
function(buffer) { |
|
displayMessage('Audio decoded'); |
|
|
|
// Send the decoded audio |
|
displayMessage("Sending audio file to worker"); |
|
message = { |
|
type:'audio', |
|
filename: audioFile.name, |
|
data: audioFile.file |
|
}; |
|
worker.postMessage(message); |
|
|
|
// Start playback of audio |
|
audioVO.sourceNode.buffer = buffer; |
|
playback(); |
|
}, |
|
function(err) { |
|
displayMessage('Audio decode error: ' + err); |
|
} |
|
); |
|
} |
|
|
|
// Begin audio playback and processing |
|
var offset; |
|
function playback() { |
|
displayMessage('Starting audio playback'); |
|
|
|
// Start audio playback |
|
audioVO.sourceNode.start(0); |
|
|
|
// Set audio process callback |
|
audioVO.javascriptNode.onaudioprocess = process; |
|
} |
|
|
|
// Persistent values used by the audio processing callback |
|
var currentTime, |
|
targetTime, |
|
currentSecond, |
|
currentFrame, |
|
currentFrameSet, |
|
snapshot, |
|
seconds = []; |
|
|
|
// Audio processing callback |
|
// |
|
// Collects an array of arrays: seconds and snapshots. |
|
// We don't know duration of the audio, so we track |
|
// each second, collecting an array of snapshots, |
|
// each taken at its calculated offset within the second. |
|
// Since there could be hiccups in timing, and we have |
|
// no assurance that we'll be called exactly on time for |
|
// each snapshot, we take a snapshot (in audioVO) each |
|
// time we're called, and when we reach (or pass) the |
|
// next target frame time, we use last one taken. |
|
function process() { |
|
|
|
// On first call, get offset since context was created |
|
if (!offset) offset = audioVO.context.currentTime; |
|
|
|
// Get the adjusted time in seconds since we started playback |
|
currentTime = audioVO.context.currentTime - offset; |
|
|
|
// Get the array of frames for the current second |
|
currentSecond = Math.floor(currentTime); |
|
if (seconds.length > currentSecond) { |
|
currentFrameSet = seconds[currentSecond]; |
|
} else { |
|
currentFrameSet = new Array(FPS); |
|
seconds.push(currentFrameSet); |
|
currentFrame = 0; |
|
} |
|
|
|
// Calculate the target time for this frame |
|
// Frame frequency of 0.03333 yields 30 fps |
|
// Frame frequency of 0.02083 yields 48 fps |
|
// Frame frequency of 0.01666 yields 60 fps |
|
targetTime = currentSecond + (currentFrame * frameFreq); |
|
|
|
// Have we passed the target time? |
|
// If so, store the last snapshot for the target. |
|
if (currentTime > targetTime) { |
|
displayMessage('Processing audio for second: ' + snapshot.second + ' / frame: ' + snapshot.frame ); |
|
currentFrameSet[currentFrame] = snapshot; |
|
currentFrame++; |
|
} |
|
|
|
// Get average for the first channel |
|
var array = new Uint8Array(audioVO.analyser.frequencyBinCount); |
|
audioVO.analyser.getByteFrequencyData(array); |
|
var average = getAverageVolume(array); |
|
|
|
// Get average for the second channel |
|
var array2 = new Uint8Array(audioVO.analyser2.frequencyBinCount); |
|
audioVO.analyser2.getByteFrequencyData(array2); |
|
var average2 = getAverageVolume(array2); |
|
|
|
// Get frequency spectrum |
|
var freqArray = new Uint8Array(audioVO.analyserFreq.frequencyBinCount); |
|
audioVO.analyserFreq.getByteFrequencyData(freqArray); |
|
|
|
// Load the audioVO with the actionable values |
|
audioVO.audio_lAverage = average; |
|
audioVO.audio_rAverage = average2; |
|
audioVO.audioVolume = (average + average2) / 2 / 100; |
|
audioVO.audio_freqArray = freqArray; |
|
audioVO.audioBass = getAverageVolume(freqArray.slice(0, 7)) / 200; |
|
audioVO.audioMid = getAverageVolume(freqArray.slice(8, 65)) / 125; |
|
audioVO.audioHigh = getAverageVolume(freqArray.slice(65, 128)) / 75; |
|
|
|
// Snapshot just the actionable values and timing info |
|
snapshot = { |
|
second: currentSecond, |
|
frame: currentFrame, |
|
currentTime: currentTime, |
|
targetTime: targetTime, |
|
audio_lAverage: audioVO.audio_lAverage, |
|
audio_rAverage: audioVO.audio_rAverage, |
|
audioVolume: audioVO.audioVolume, |
|
audioBass: audioVO.audioBass, |
|
audioHigh: audioVO.audioHigh, |
|
audioMid: audioVO.audioMid |
|
}; |
|
|
|
// Uncomment to determine how often this method is called |
|
// console.log("processing "+currentSecond); |
|
|
|
// Inner private function to get average volume |
|
function getAverageVolume (array) { |
|
var values = 0; |
|
var average; |
|
var length = array.length; |
|
|
|
// get all the frequency amplitudes |
|
for (var i = 0; i < length; i++) { |
|
values += array[i]; |
|
} |
|
|
|
average = values / length; |
|
return average; |
|
} |
|
} |
|
|
|
// The audio playback complete handler |
|
function onPlaybackComplete() { |
|
displayMessage('Audio playback complete'); |
|
audioVO.sourceNode.stop(0); |
|
audioVO.sourceNode.disconnect(audioVO.context.destination); |
|
audioVO.javascriptNode.disconnect(audioVO.context.destination); |
|
audioVO.javascriptNode.onaudioprocess = null; |
|
postProcessAudio(); |
|
} |
|
|
|
// Post-process the audio snapshots |
|
// |
|
// Construct a single list of snapshots from the |
|
// data we grabbed during playback. |
|
// |
|
// Even though we did our best to grab audio |
|
// as close as possible to each target frame in |
|
// each second, there could still be gaps that |
|
// are longer than one frame in length, leading |
|
// to seconds without enough frame data. |
|
function postProcessAudio(){ |
|
displayMessage('Post processing audio...'); |
|
|
|
// Make sure we have an audio snapshot for every frame |
|
var i, j, message, snapshots, snapshot; |
|
var previous = seconds[0][0]; |
|
for (i=0;i < seconds.length; i++) { |
|
snapshots = seconds[i]; |
|
for (j=0; j<FPS; j++){ |
|
snapshot = snapshots[j]; |
|
if (snapshot === undefined) { |
|
message = 'Snapshot missing for second: ' + i; |
|
message += ', frame: ' + j; |
|
displayMessage(message); |
|
message = 'Using snapshot for second: ' + previous.second; |
|
message += ', frame: '+ previous.frame; |
|
audioVOs.push(previous); |
|
} else { |
|
audioVOs.push(snapshot); |
|
previous = snapshot; |
|
} |
|
} |
|
} |
|
|
|
// We can't sync onaudioprocess with sourceNode.start(), |
|
// so remove initial zero volume frames |
|
while (audioVOs[0].audioVolume === 0) { |
|
audioVOs.shift(); |
|
} |
|
|
|
// Audio data has been cleaned up, now build the canvas |
|
startRender(); |
|
} |
|
|
|
function startRender(){ |
|
displayMessage('Start rendering ' + audioVOs.length + ' frames...'); |
|
document.body.appendChild(canvas); |
|
render(); |
|
} |
|
|
|
// The render loop |
|
var message = null; |
|
var frame = 0; |
|
var clones = []; |
|
var i; |
|
var render = function () { |
|
|
|
// Create frames for all the audio samples |
|
if (frame < audioVOs.length) { |
|
displayMessage("Rendering frame " + frame); |
|
|
|
// Render and increment frame |
|
requestAnimationFrame(render); |
|
renderer.render(scene, camera); |
|
|
|
// Get the pre-extracted audio values for this frame |
|
audioVO = audioVOs[frame]; |
|
|
|
// Tie cube location to audio low, mid, high and l/r averages |
|
// Also, rock it around the clock |
|
var radius = 2; |
|
var angle = frame.mod(360); |
|
var newX = (radius + audioVO.audioHigh) * Math.cos(angle / audioVO.audio_lAverage); |
|
var newY = (radius + audioVO.audioMid) * Math.sin(angle / audioVO.audio_rAverage); |
|
var newZ = (radius - audioVO.audioBass) / Math.tan(angle * audioVO.audioVolume); |
|
cube.position.set(newX, newY, newZ); |
|
|
|
// Tie rotation to averages |
|
cube.rotation.x += audioVO.audio_lAverage; |
|
cube.rotation.y += audioVO.audio_rAverage; |
|
|
|
// Tie cube scale to audio bass |
|
var scale = (frame).mod(audioVO.audioBass); |
|
var newScale = scale / audioVO.audioBass; |
|
cube.scale.set(newScale, newScale, newScale); |
|
|
|
// Throw the occasional clone |
|
if (Math.floor(Math.random() * 2)){ |
|
var clone = new THREE.Mesh( geometry, material ); |
|
clone.scale.set(newScale / 5, newScale / 5, newScale / 5); |
|
clone.position.set(newX, newY, newZ); |
|
clone.rotation.x -= audioVO.audio_rAverage; |
|
clone.rotation.y -= audioVO.audio_lAverage; |
|
clone.velocity = { |
|
x: (Math.random() * audioVO.audioBass)/2, |
|
y: (Math.random() * audioVO.audioMid)/2, |
|
z: (Math.random() * audioVO.audioHigh)/2 |
|
}; |
|
scene.add(clone); |
|
clones.push(clone); |
|
} |
|
|
|
// Adjust the clones |
|
for (i=0; i<clones.length; i++){ |
|
clone = clones[i]; |
|
clone.position.z -= (clone.velocity.z - angle.mod(-newZ / i)); |
|
clone.position.y += (clone.velocity.y + angle.mod(-newY / i)); |
|
clone.position.x -= (clone.velocity.x - angle.mod(newX / i)); |
|
newScale = audioVO.audioVolume.mod(i) / 2; |
|
clone.scale.set(newScale, newScale, newScale); |
|
} |
|
|
|
// Kick the camera rotation based on volume |
|
camera.rotation.z -= audioVO.audioVolume/2; |
|
|
|
// Send the rendered frame to the web worker |
|
message = { |
|
type: 'frame', |
|
ordinal: frame, |
|
data: canvas.toDataURL('image/png') |
|
}; |
|
|
|
worker.postMessage(message); |
|
frame++; |
|
|
|
} else { |
|
|
|
// Notify the worker that we're done |
|
message = {type:'done'}; |
|
worker.postMessage(message); |
|
displayMessage("Done rendering frames"); |
|
|
|
} |
|
}; |
|
|
|
// Progress display divs |
|
const PAGE = 'PAGE'; |
|
const WORKER = 'WORKER'; |
|
const SERVER = 'SERVER'; |
|
const COMPLETE = 'COMPLETE'; |
|
var progressDiv = document.getElementById('progress'); |
|
|
|
var pageDiv = document.createElement("div"); |
|
pageDiv.className = "progressItem"; |
|
progressDiv.appendChild(pageDiv); |
|
|
|
var workerDiv = document.createElement("div"); |
|
workerDiv.className = "progressItem"; |
|
progressDiv.appendChild(workerDiv); |
|
|
|
var serverDiv = document.createElement("div"); |
|
serverDiv.className = "progressItem"; |
|
progressDiv.appendChild(serverDiv); |
|
|
|
var progressDivs = { |
|
"PAGE": pageDiv, |
|
"WORKER": workerDiv, |
|
"SERVER": serverDiv |
|
}; |
|
|
|
// Display messages on the page |
|
function displayMessage(message, sender){ |
|
if (sender === COMPLETE) { |
|
sender = PAGE; |
|
processEnd = Date.now(); |
|
duration = processEnd - processStart; |
|
var min = (duration/1000/60) << 0, |
|
sec = (duration/1000) % 60; |
|
message = "Total build time: " + min + ':' + sec; |
|
} |
|
if (!sender) sender = PAGE; |
|
progressDivs[sender].innerHTML = message; |
|
} |
|
|
|
</script> |
hello, it seems to be cool file but i dont see any audio preview, and file conversion, im not quite sure what i could have done wrong here;;