Skip to content

Instantly share code, notes, and snippets.

@cliffhall
Last active September 2, 2019 19:11
Show Gist options
  • Star 6 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save cliffhall/09af8ba728659a3fc81ba21d16c9a6d1 to your computer and use it in GitHub Desktop.
Save cliffhall/09af8ba728659a3fc81ba21d16c9a6d1 to your computer and use it in GitHub Desktop.

This demo builds on previous gists in this series to render an audio-modulated WebGL animation, and output a 48 FPS, 1080p, HD video utilizing:

  • HTML5's Web Audio API for analysing an audio file.
  • HTML5's Web Worker API for offloading communications between the web page and the server to separate thread.
  • Socket.io for sending messages (including image and audio files) between client and server.
  • Node.js for the server process for storing generated images and uploaded audio and creating the final video.
  • FFMpeg for the actual video creation.
  • Fluent-ffmpeg for making the configuration and execution of FFMpeg commands easy peasy.
  • @ffmpeg-installer/ffmpeg for installing the correct version of FFMpeg for the hardware this demo runs on.
  • mkdirp for creating the folder path for each client's data, including any missing nodes in the path.

To achieve this, there are three scripts, one in the page, one on the web worker thread, and another for the Node.js server.

Read more about the project here:

{
"name": "test-av-demo",
"version": "1.0.0",
"description": "Accepts a stream of images and audio via socket and renders them into a video",
"main": "test-av-server.js",
"dependencies": {
"@ffmpeg-installer/ffmpeg": "^1.0.9",
"fluent-ffmpeg": "^2.1.0",
"mkdirp": "^0.5.1",
"socket.io": "^1.5.0"
},
"author": "Cliff Hall",
"license": "MIT",
"homepage": "http://cliffordhall.com"
}
<!-- test-av-preview-client.html -->
<!-- PREVIEW THE AUDIOVISUAL EFFECT -->
<div>
Select an audio file:
<input type="file" id="fileSelector" name="file" />
</div>
<script src="https://rawgithub.com/mrdoob/three.js/master/build/three.js"></script>
<script>
// A modulo function for object positioning
Number.prototype.mod = function(n) {
return ((this%n)+n)%n;
};
// Create scene and renderer
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(75, window.innerWidth/window.innerHeight, 0.1, 1000);
var geometry = new THREE.BoxGeometry( 1, 1, 1 );
var material = new THREE.MeshNormalMaterial();
var cube = new THREE.Mesh( geometry, material );
var renderer = new THREE.WebGLRenderer();
scene.add(cube);
camera.position.z = 12;
renderer.setSize(window.innerWidth,window.innerHeight);
// Renderer's canvas
var canvas = renderer.domElement;
document.body.appendChild(canvas);
// Audio value object.
// Values extracted on each audio sample that affect rendered scene.
var audioVO = {
context: null,
sourceNode: null,
javascriptNode: null,
splitter: null,
analyser: null,
analyser2: null,
analyserFreq: null,
audio_freqArray: [],
audio_lAverage: 0,
audio_rAverage: 0,
audioVolume: 0,
audioBass: 0,
audioHigh: 0,
audioMid: 0
};
// The loaded audio file
var audioFile;
// Handle audio file selection
var selector = document.getElementById('fileSelector');
selector.addEventListener('change', onFileSelect, false);
function onFileSelect(e) {
var file = e.target.files[0];
reader.readAsArrayBuffer(file);
}
// Read the selected audio file
var reader = new FileReader();
reader.onload = function (e) {
audioFile = e.target.result;
setupAudioNodes();
decodeAudio();
};
// Set up audio nodes for playback and analysis
function setupAudioNodes() {
// If AudioContext is there, create the AudioVO and initialize its AudioContext
window.AudioContext = window.AudioContext || window.webkitAudioContext;
if (!window.AudioContext) {
console.log('No AudioContext found.');
} else {
// Set up the audio context
audioVO.context = new AudioContext();
// Set up a javascript node
audioVO.javascriptNode = audioVO.context.createScriptProcessor(512, 1, 1);
// Set up channel and frequency analyzers
audioVO.analyser = audioVO.context.createAnalyser();
audioVO.analyser.smoothingTimeConstant = 0.5;
audioVO.analyser.fftSize = 1024;
audioVO.analyser2 = audioVO.context.createAnalyser();
audioVO.analyser2.smoothingTimeConstant = 0.5;
audioVO.analyser2.fftSize = 1024;
audioVO.analyserFreq = audioVO.context.createAnalyser();
audioVO.analyserFreq.smoothingTimeConstant = 0.3;
audioVO.analyserFreq.fftSize = 256;
// Create a buffer source node and splitter
audioVO.sourceNode = audioVO.context.createBufferSource();
audioVO.splitter = audioVO.context.createChannelSplitter();
// Connect buffer source node to frequency analyser and splitter
audioVO.sourceNode.connect(audioVO.splitter);
audioVO.sourceNode.connect(audioVO.analyserFreq);
audioVO.sourceNode.onended = onPlaybackComplete;
// Connect outputs from splitter to channel analysers
audioVO.splitter.connect(audioVO.analyser, 0, 0);
audioVO.splitter.connect(audioVO.analyser2, 1, 0);
// Connect the analysers to the javascriptnode
audioVO.analyser.connect(audioVO.javascriptNode);
audioVO.analyser2.connect(audioVO.javascriptNode);
// Connect the source node to the destination
audioVO.sourceNode.connect(audioVO.context.destination);
}
}
// Decode the loaded audio
function decodeAudio() {
audioVO.context.decodeAudioData(
audioFile,
function(buffer) {
console.log('Audio Decoded');
audioVO.sourceNode.buffer = buffer;
begin();
},
function(err) {
console.log("Audio Decode Error: " + err);
}
);
}
// Begin audio playback
function playback() {
// Start audio playback
audioVO.sourceNode.start(0);
// Set audio process callback
audioVO.javascriptNode.onaudioprocess = process;
}
// The audio processing callback
function process() {
// Get average for the first channel
var array = new Uint8Array(audioVO.analyser.frequencyBinCount);
audioVO.analyser.getByteFrequencyData(array);
var average = getAverageVolume(array);
// Get average for the second channel
var array2 = new Uint8Array(audioVO.analyser2.frequencyBinCount);
audioVO.analyser2.getByteFrequencyData(array2);
var average2 = getAverageVolume(array2);
// Get frequency spectrum
var freqArray = new Uint8Array(audioVO.analyserFreq.frequencyBinCount);
audioVO.analyserFreq.getByteFrequencyData(freqArray);
// Load the VO with the actionable values
audioVO.audio_lAverage = average;
audioVO.audio_rAverage = average2;
audioVO.audioVolume = (average + average2) / 2 / 100;
audioVO.audio_freqArray = freqArray;
audioVO.audioBass = getAverageVolume(freqArray.slice(0, 7)) / 200;
audioVO.audioMid = getAverageVolume(freqArray.slice(8, 65)) / 125;
audioVO.audioHigh = getAverageVolume(freqArray.slice(65, 128)) / 75;
// Inner private function to get average volume
function getAverageVolume (array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
}
// The audio playback complete handler
function onPlaybackComplete() {
done = true;
audioVO.sourceNode.stop(0);
audioVO.sourceNode.disconnect(audioVO.context.destination);
audioVO.javascriptNode.disconnect(audioVO.context.destination);
audioVO.javascriptNode.onaudioprocess = null;
}
// The render loop
var done = false;
var frame = 0;
var clones = [];
var i;
function render() {
if (!done) {
// Tie cube location to audio low, mid, high and l/r averages
// Also, rock it around the clock
var radius = 2;
var angle = frame.mod(360);
var newX = (radius + audioVO.audioHigh) * Math.cos(angle / audioVO.audio_lAverage);
var newY = (radius + audioVO.audioMid) * Math.sin(angle / audioVO.audio_rAverage);
var newZ = (radius - audioVO.audioBass) / Math.tan(angle * audioVO.audioVolume);
cube.position.set(newX, newY, newZ);
// Tie rotation to averages
cube.rotation.x += audioVO.audio_lAverage;
cube.rotation.y += audioVO.audio_rAverage;
// Tie cube scale to audio bass
var scale = (frame).mod(audioVO.audioBass);
var newScale = scale / audioVO.audioBass;
cube.scale.set(newScale,newScale,newScale);
// Throw the occasional clone
if (Math.floor(Math.random() * 2)){
var clone = new THREE.Mesh( geometry, material );
clone.scale.set(newScale / 5, newScale / 5, newScale / 5);
clone.position.set(newX, newY, newZ);
clone.rotation.x -= audioVO.audio_rAverage;
clone.rotation.y -= audioVO.audio_lAverage;
clone.velocity = {
x: (Math.random() * audioVO.audioBass)/2,
y: (Math.random() * audioVO.audioMid)/2,
z: (Math.random() * audioVO.audioHigh)/2
};
scene.add(clone);
clones.push(clone);
}
// Adjust the clones
for (i=0; i<clones.length; i++){
clone = clones[i];
clone.position.z -= (clone.velocity.z - angle.mod(-newZ / i));
clone.position.y += (clone.velocity.y + angle.mod(-newY / i));
clone.position.x -= (clone.velocity.x - angle.mod(newX / i));
newScale = audioVO.audioVolume.mod(i) / 2;
clone.scale.set(newScale, newScale, newScale);
}
// Kick the camera rotation based on volume
camera.rotation.z -= audioVO.audioVolume/2;
// Render and increment frame
requestAnimationFrame(render);
renderer.render(scene, camera);
frame++;
} else {
// Clear canvas after final frame is rendered
renderer.clear();
}
}
// Start the preview
function begin(){
playback();
render();
}
</script>
<!-- test-av-record-client.html -->
<!-- RECORD THE AUDIOVISUAL EFFECT, SHOWING CONTINUAL PROGRESS FROM ALL COMPONENTS -->
<h2>Load and process audio, render frames, send frames to server, create video.</h2>
<style>
.progressContainer {
display: flex;
display:-webkit-flex;
width: 100%;
padding: 10px;
-webkit-justify-content: space-around;
justify-content: space-around;
}
.progressItem {
width: 33.33%;
}
.hidden {
display: none;
}
</style>
<div id="selectorForm" class="progressContainer">
<p>Select an audio file:
<input type="file" id="fileSelector" name="file" /></p>
</div>
<div class="progressContainer">
<div class="progressItem"><b>[PAGE]</b></div>
<div class="progressItem"><b>[WORKER]</b></div>
<div class="progressItem"><b>[SERVER]</b></div>
</div>
<div id="progress" class="progressContainer"></div>
<script src="https://rawgithub.com/mrdoob/three.js/master/build/three.js"></script>
<script>
// Frame rate
const FPS = 48;
var frameFreq = (1000/FPS)/1000;
// A modulo function for object positioning
Number.prototype.mod = function(n) {
return ((this%n)+n)%n;
};
// Time the overall process duration
var processStart;
var processEnd;
var duration;
// Create scene and renderer
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(75, 1920/1080, 0.1, 1000);
var geometry = new THREE.BoxGeometry( 1, 1, 1 );
var material = new THREE.MeshNormalMaterial();
var cube = new THREE.Mesh( geometry, material );
var renderer = new THREE.WebGLRenderer();
var canvas = renderer.domElement;
scene.add(cube);
camera.position.z = 5;
renderer.setSize(1920,1080);
renderer.render(scene, camera);
// Create a web worker to transmit the frames on another thread
var worker;
if (typeof(Worker) !== 'undefined') {
worker = new Worker('test-av-worker.js');
worker.addEventListener('message', function(e) {
var message = e.data;
displayMessage(message.data, message.sender);
}, false);
} else {
var message ='No web worker support';
displayMessage(message);
throw new Error(message);
}
// Audio value object.
// Values extracted on each audio sample that affect rendered scene.
var audioVO = {
context: null,
sourceNode: null,
javascriptNode: null,
splitter: null,
analyser: null,
analyser2: null,
analyserFreq: null,
audio_freqArray: [],
audio_lAverage: 0,
audio_rAverage: 0,
audioVolume: 0,
audioBass: 0,
audioHigh: 0,
audioMid: 0
};
// Array of audioVOs, to be extracted during playback
var audioVOs = [];
// The loaded audio file and its name
var audioFile = {};
// Handle audio file selection
var selector = document.getElementById('fileSelector');
selector.addEventListener('change', onFileSelect, false);
function onFileSelect(e) {
var file = e.target.files[0];
audioFile.name = file.name;
audioFile.file = file;
displayMessage('Loading '+audioFile.name);
reader.readAsArrayBuffer(file);
}
// Read the selected audio file
var reader = new FileReader();
reader.onload = function (e) {
processStart = Date.now(); // Begin timing the overall process
var selectorForm = document.getElementById('selectorForm');
selectorForm.innerHTML = "Creating video for: "+ audioFile.name +" ...";
audioFile.data = e.target.result;
displayMessage(audioFile.name + ' loaded');
setupAudioNodes();
decodeAudio();
};
// Set up audio nodes for playback and analysis
function setupAudioNodes() {
// If AudioContext is there, create the AudioVO and initialize its AudioContext
window.AudioContext = window.AudioContext || window.webkitAudioContext;
if (!window.AudioContext) {
displayMessage('No AudioContext found!');
} else {
// Set up the audio context
audioVO.context = new AudioContext();
// Set up a javascript node
audioVO.javascriptNode = audioVO.context.createScriptProcessor(512, 1, 1);
// Set up channel and frequency analyzers
audioVO.analyser = audioVO.context.createAnalyser();
audioVO.analyser.smoothingTimeConstant = 0.5;
audioVO.analyser.fftSize = 1024;
audioVO.analyser2 = audioVO.context.createAnalyser();
audioVO.analyser2.smoothingTimeConstant = 0.5;
audioVO.analyser2.fftSize = 1024;
audioVO.analyserFreq = audioVO.context.createAnalyser();
audioVO.analyserFreq.smoothingTimeConstant = 0.3;
audioVO.analyserFreq.fftSize = 256;
// Create a buffer source node and splitter
audioVO.sourceNode = audioVO.context.createBufferSource();
audioVO.splitter = audioVO.context.createChannelSplitter();
// Connect buffer source node to frequency analyser and splitter
audioVO.sourceNode.connect(audioVO.splitter);
audioVO.sourceNode.connect(audioVO.analyserFreq);
audioVO.sourceNode.onended = onPlaybackComplete;
// Connect outputs from splitter to channel analysers
audioVO.splitter.connect(audioVO.analyser, 0, 0);
audioVO.splitter.connect(audioVO.analyser2, 1, 0);
// Connect the analysers to the javascriptnode
audioVO.analyser.connect(audioVO.javascriptNode);
audioVO.analyser2.connect(audioVO.javascriptNode);
// Connect the source node to the destination
audioVO.sourceNode.connect(audioVO.context.destination);
}
}
// Decode the loaded audio
function decodeAudio() {
displayMessage('Decoding audio...');
audioVO.context.decodeAudioData(
audioFile.data,
function(buffer) {
displayMessage('Audio decoded');
// Send the decoded audio
displayMessage("Sending audio file to worker");
message = {
type:'audio',
filename: audioFile.name,
data: audioFile.file
};
worker.postMessage(message);
// Start playback of audio
audioVO.sourceNode.buffer = buffer;
playback();
},
function(err) {
displayMessage('Audio decode error: ' + err);
}
);
}
// Begin audio playback and processing
var offset;
function playback() {
displayMessage('Starting audio playback');
// Start audio playback
audioVO.sourceNode.start(0);
// Set audio process callback
audioVO.javascriptNode.onaudioprocess = process;
}
// Persistent values used by the audio processing callback
var currentTime,
targetTime,
currentSecond,
currentFrame,
currentFrameSet,
snapshot,
seconds = [];
// Audio processing callback
//
// Collects an array of arrays: seconds and snapshots.
// We don't know duration of the audio, so we track
// each second, collecting an array of snapshots,
// each taken at its calculated offset within the second.
// Since there could be hiccups in timing, and we have
// no assurance that we'll be called exactly on time for
// each snapshot, we take a snapshot (in audioVO) each
// time we're called, and when we reach (or pass) the
// next target frame time, we use last one taken.
function process() {
// On first call, get offset since context was created
if (!offset) offset = audioVO.context.currentTime;
// Get the adjusted time in seconds since we started playback
currentTime = audioVO.context.currentTime - offset;
// Get the array of frames for the current second
currentSecond = Math.floor(currentTime);
if (seconds.length > currentSecond) {
currentFrameSet = seconds[currentSecond];
} else {
currentFrameSet = new Array(FPS);
seconds.push(currentFrameSet);
currentFrame = 0;
}
// Calculate the target time for this frame
// Frame frequency of 0.03333 yields 30 fps
// Frame frequency of 0.02083 yields 48 fps
// Frame frequency of 0.01666 yields 60 fps
targetTime = currentSecond + (currentFrame * frameFreq);
// Have we passed the target time?
// If so, store the last snapshot for the target.
if (currentTime > targetTime) {
displayMessage('Processing audio for second: ' + snapshot.second + ' / frame: ' + snapshot.frame );
currentFrameSet[currentFrame] = snapshot;
currentFrame++;
}
// Get average for the first channel
var array = new Uint8Array(audioVO.analyser.frequencyBinCount);
audioVO.analyser.getByteFrequencyData(array);
var average = getAverageVolume(array);
// Get average for the second channel
var array2 = new Uint8Array(audioVO.analyser2.frequencyBinCount);
audioVO.analyser2.getByteFrequencyData(array2);
var average2 = getAverageVolume(array2);
// Get frequency spectrum
var freqArray = new Uint8Array(audioVO.analyserFreq.frequencyBinCount);
audioVO.analyserFreq.getByteFrequencyData(freqArray);
// Load the audioVO with the actionable values
audioVO.audio_lAverage = average;
audioVO.audio_rAverage = average2;
audioVO.audioVolume = (average + average2) / 2 / 100;
audioVO.audio_freqArray = freqArray;
audioVO.audioBass = getAverageVolume(freqArray.slice(0, 7)) / 200;
audioVO.audioMid = getAverageVolume(freqArray.slice(8, 65)) / 125;
audioVO.audioHigh = getAverageVolume(freqArray.slice(65, 128)) / 75;
// Snapshot just the actionable values and timing info
snapshot = {
second: currentSecond,
frame: currentFrame,
currentTime: currentTime,
targetTime: targetTime,
audio_lAverage: audioVO.audio_lAverage,
audio_rAverage: audioVO.audio_rAverage,
audioVolume: audioVO.audioVolume,
audioBass: audioVO.audioBass,
audioHigh: audioVO.audioHigh,
audioMid: audioVO.audioMid
};
// Uncomment to determine how often this method is called
// console.log("processing "+currentSecond);
// Inner private function to get average volume
function getAverageVolume (array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
}
// The audio playback complete handler
function onPlaybackComplete() {
displayMessage('Audio playback complete');
audioVO.sourceNode.stop(0);
audioVO.sourceNode.disconnect(audioVO.context.destination);
audioVO.javascriptNode.disconnect(audioVO.context.destination);
audioVO.javascriptNode.onaudioprocess = null;
postProcessAudio();
}
// Post-process the audio snapshots
//
// Construct a single list of snapshots from the
// data we grabbed during playback.
//
// Even though we did our best to grab audio
// as close as possible to each target frame in
// each second, there could still be gaps that
// are longer than one frame in length, leading
// to seconds without enough frame data.
function postProcessAudio(){
displayMessage('Post processing audio...');
// Make sure we have an audio snapshot for every frame
var i, j, message, snapshots, snapshot;
var previous = seconds[0][0];
for (i=0;i < seconds.length; i++) {
snapshots = seconds[i];
for (j=0; j<FPS; j++){
snapshot = snapshots[j];
if (snapshot === undefined) {
message = 'Snapshot missing for second: ' + i;
message += ', frame: ' + j;
displayMessage(message);
message = 'Using snapshot for second: ' + previous.second;
message += ', frame: '+ previous.frame;
audioVOs.push(previous);
} else {
audioVOs.push(snapshot);
previous = snapshot;
}
}
}
// We can't sync onaudioprocess with sourceNode.start(),
// so remove initial zero volume frames
while (audioVOs[0].audioVolume === 0) {
audioVOs.shift();
}
// Audio data has been cleaned up, now build the canvas
startRender();
}
function startRender(){
displayMessage('Start rendering ' + audioVOs.length + ' frames...');
document.body.appendChild(canvas);
render();
}
// The render loop
var message = null;
var frame = 0;
var clones = [];
var i;
var render = function () {
// Create frames for all the audio samples
if (frame < audioVOs.length) {
displayMessage("Rendering frame " + frame);
// Render and increment frame
requestAnimationFrame(render);
renderer.render(scene, camera);
// Get the pre-extracted audio values for this frame
audioVO = audioVOs[frame];
// Tie cube location to audio low, mid, high and l/r averages
// Also, rock it around the clock
var radius = 2;
var angle = frame.mod(360);
var newX = (radius + audioVO.audioHigh) * Math.cos(angle / audioVO.audio_lAverage);
var newY = (radius + audioVO.audioMid) * Math.sin(angle / audioVO.audio_rAverage);
var newZ = (radius - audioVO.audioBass) / Math.tan(angle * audioVO.audioVolume);
cube.position.set(newX, newY, newZ);
// Tie rotation to averages
cube.rotation.x += audioVO.audio_lAverage;
cube.rotation.y += audioVO.audio_rAverage;
// Tie cube scale to audio bass
var scale = (frame).mod(audioVO.audioBass);
var newScale = scale / audioVO.audioBass;
cube.scale.set(newScale, newScale, newScale);
// Throw the occasional clone
if (Math.floor(Math.random() * 2)){
var clone = new THREE.Mesh( geometry, material );
clone.scale.set(newScale / 5, newScale / 5, newScale / 5);
clone.position.set(newX, newY, newZ);
clone.rotation.x -= audioVO.audio_rAverage;
clone.rotation.y -= audioVO.audio_lAverage;
clone.velocity = {
x: (Math.random() * audioVO.audioBass)/2,
y: (Math.random() * audioVO.audioMid)/2,
z: (Math.random() * audioVO.audioHigh)/2
};
scene.add(clone);
clones.push(clone);
}
// Adjust the clones
for (i=0; i<clones.length; i++){
clone = clones[i];
clone.position.z -= (clone.velocity.z - angle.mod(-newZ / i));
clone.position.y += (clone.velocity.y + angle.mod(-newY / i));
clone.position.x -= (clone.velocity.x - angle.mod(newX / i));
newScale = audioVO.audioVolume.mod(i) / 2;
clone.scale.set(newScale, newScale, newScale);
}
// Kick the camera rotation based on volume
camera.rotation.z -= audioVO.audioVolume/2;
// Send the rendered frame to the web worker
message = {
type: 'frame',
ordinal: frame,
data: canvas.toDataURL('image/png')
};
worker.postMessage(message);
frame++;
} else {
// Notify the worker that we're done
message = {type:'done'};
worker.postMessage(message);
displayMessage("Done rendering frames");
}
};
// Progress display divs
const PAGE = 'PAGE';
const WORKER = 'WORKER';
const SERVER = 'SERVER';
const COMPLETE = 'COMPLETE';
var progressDiv = document.getElementById('progress');
var pageDiv = document.createElement("div");
pageDiv.className = "progressItem";
progressDiv.appendChild(pageDiv);
var workerDiv = document.createElement("div");
workerDiv.className = "progressItem";
progressDiv.appendChild(workerDiv);
var serverDiv = document.createElement("div");
serverDiv.className = "progressItem";
progressDiv.appendChild(serverDiv);
var progressDivs = {
"PAGE": pageDiv,
"WORKER": workerDiv,
"SERVER": serverDiv
};
// Display messages on the page
function displayMessage(message, sender){
if (sender === COMPLETE) {
sender = PAGE;
processEnd = Date.now();
duration = processEnd - processStart;
var min = (duration/1000/60) << 0,
sec = (duration/1000) % 60;
message = "Total build time: " + min + ':' + sec;
}
if (!sender) sender = PAGE;
progressDivs[sender].innerHTML = message;
}
</script>
// test-av-server.js
//
// HANDLE CLIENT CONNECTIONS,
// RECEIVING IMAGE AND AUDIO FILES,
// CREATING RESULTANT VIDEO
// Required modules
var fs = require('fs');
var mkdirp = require('mkdirp');
// Create the socket server
const PORT = 3000;
var socket = require('socket.io')(PORT);
console.log('Socket server listening on port: '+PORT);
// Handle connections
socket.on('connection', function(client) {
const FPS = 48;
var folder, audioFile, videoFile, timemark, report;
// Listen for frame and disconnect events
client.on('frame', onData);
client.on('audio', onData);
client.on('done', onClientDone);
client.on('disconnect', onDisconnect);
// Create output folder for this client
folder = "/var/tmp/test-av-server/" + client.id + "/";
mkdirp(folder);
// Handle a frame event from the client
function onData(message, callback) {
// Get filename
var file, filename;
if (message.type === 'frame') {
console.log('Received frame: "' + message.ordinal + '" from client: ' + client.id);
var zeroPadFrame = ("000000" + message.ordinal).slice(-6);
filename = String("frame_" + zeroPadFrame + ".png");
file = Buffer.from(message.data, 'base64');
} else if (message.type === 'audio') {
console.log('Received audio track from client: ' + client.id);
audioFile = filename = message.filename;
videoFile = audioFile.split(".")[0] + ".mp4";
file = message.data;
}
// Create the file
fs.writeFile(folder+filename, file.toString('binary'), 'binary');
// Acknowledge receipt
callback();
}
// Client is done sending data, so create the movie
function onClientDone(message, callback) {
// Acknowledge receipt
callback();
// Configure FFMpeg
var ffmpegPath = require('@ffmpeg-installer/ffmpeg').path;
var ffmpeg = require('fluent-ffmpeg');
ffmpeg.setFfmpegPath(ffmpegPath);
var command = ffmpeg();
// Bust a vid
createVideo(command);
}
// Create the video
function createVideo(command) {
// Execute the command
command
.on('end', onEnd )
.on('progress', onProgress)
.on('error', onError)
.input(folder+'frame_%06d.png')
.inputFPS(FPS)
.videoFilter(["movie=soa-watermark.png [watermark]; [in][watermark] overlay=10:main_h-overlay_h-10 [out]"])
.input(folder+audioFile)
.output(folder+videoFile)
.outputFPS(FPS)
.run();
}
// Report video creation progress
function onProgress(progress){
if (progress.timemark != timemark) {
timemark = progress.timemark;
report = 'Video Progress: ' + timemark + "...";
console.log(report);
client.emit('video-progress',{data:report});
}
}
// Report video error
function onError(err, stdout, stderr) {
report ='Error creating video: ' + err.message;
console.log(report);
client.emit('video-error', {data:report});
}
// Report video error
function onEnd() {
report = 'Finished creating video: '+folder+audioFile;
console.log(report);
client.emit('video-done', {data:report});
}
// Handle a disconnection from the client
function onDisconnect() {
console.log('Received: disconnect event from client: ' + client.id);
client.removeListener('frame', onData);
client.removeListener('audio', onData);
client.removeListener('done', onClientDone);
client.removeListener('disconnect', onDisconnect);
}
});
// test-av-worker.js
//
// MANAGE COMMUNICATIONS BETWEEN THE PAGE AND SERVER
// Connect to the socket server
self.importScripts('https://cdn.socket.io/socket.io-1.4.5.js');
var socket = io.connect('http://localhost:3000');
socket.on('video-progress', onVideoProgess);
socket.on('video-error', onVideoDone);
socket.on('video-done', onVideoDone);
// Sender constants for reporting
const WORKER = 'WORKER';
const SERVER = 'SERVER';
const COMPLETE = 'COMPLETE';
// Service the queue every 30ms (arbitrary)
var timer = setInterval(serviceQueue,30);
var frame, queue = [], done = false, sending=false;
// Handle messages from the web page
onmessage = function (e){
var message = e.data;
switch (message.type) {
// Strip off the 'data:/base64,' header from images
case 'frame':
if (message.data) message.data = message.data.split(',')[1];
break;
// Set done flag for que
case 'done':
done = true;
break;
}
// Queue the message to be transmitted
queue.push(message);
};
// Service the queue
function serviceQueue(){
// Bail out if we're currently sending
if (sending) return;
// If we have messages, send the next one
if (queue.length > 0) {
sending=true;
message = queue.shift();
socket.emit(message.type, message, function(){
// Report acknowledgments from server
if (message.type === 'frame') {
displayMessage('Received frame ' + message.ordinal, SERVER);
} else if (message.type === 'audio') {
displayMessage('Received audio ', SERVER);
} else if (message.type === 'done') {
displayMessage("Received 'client done'", SERVER);
}
sending=false;
});
// Report item sent to server
if (message.type === 'frame') {
displayMessage('Sent frame ' + message.ordinal, WORKER);
} else if (message.type === 'audio') {
displayMessage('Sent audio ', WORKER);
} else {
displayMessage("Sent 'client done'", WORKER);
}
} else if (done && queue.length === 0) {
// Kill the timer when done is true and the queue is empty
clearInterval(timer);
}
}
// Handle server progress reporting
function onVideoProgess(message){
displayMessage(message.data, SERVER);
}
// Handle server done reporting
function onVideoDone(message){
displayMessage(message.data, SERVER);
displayMessage("Done", COMPLETE);
socket.close();
close();
}
// Display a message by posting it to the page
function displayMessage(data, sender){
var message = {data:data, sender:sender};
self.postMessage(message);
}
@RouillerRomain
Copy link

hello, it seems to be cool file but i dont see any audio preview, and file conversion, im not quite sure what i could have done wrong here;;

@cliffhall
Copy link
Author

@meetAhmed
Copy link

meetAhmed commented Aug 24, 2019

i have run this but i received this error

Error creating video: ffmpeg exited with code 1: C:\Users\ahmedAli\Desktop\test/var/tmp/test-av-server/gjxvgEnyvCY3Vk4PAAAA/: Permission denied

what could be the cause of error?

@cliffhall
Copy link
Author

Sounds like ffmpeg didn’t have permissions to write to disk.

@cliffhall
Copy link
Author

cliffhall commented Aug 24, 2019 via email

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment