Skip to content

Instantly share code, notes, and snippets.

@kongmunist
Last active May 27, 2021 01:19
Show Gist options
  • Save kongmunist/cc32fb07f2f8fb760050460a3555011c to your computer and use it in GitHub Desktop.
Save kongmunist/cc32fb07f2f8fb760050460a3555011c to your computer and use it in GitHub Desktop.
Code for "Face Tracking With Javascript On Any Device With A Browser (Mobile or Desktop)" https://kongmunist.medium.com/face-tracking-with-javascript-on-any-device-with-a-browser-mobile-or-desktop-48aa561fd9d5
<!DOCTYPE html>
<html lang="en">
<head>
<title>I see you! | Face Mesh</title>
<!-- Javascript imports -->
<script src="code.js"></script>
<script>
main();
</script>
</head>
<body style="background-color:aliceblue;text-align:center;">
<video autoplay muted playsinline hidden id="video" style="width: auto; height: auto;"></video>
<canvas id="facecanvas"></canvas>
</body>
</html>
// Face Mesh Demo by Andy Kong
// Base Javascript for setting up a camera-streaming HTML webpage.
async function setupCamera() {
// Find the <video> element in the webpage,
// then use the mediaDevices API to request a camera from the user
video = document.getElementById('video');
const stream = await navigator.mediaDevices.getUserMedia({
'audio': false,
'video': {
facingMode: 'user',
width: {ideal:1920},
height: {ideal:1080},
},
});
// Assign our camera to the HTML's video element
video.srcObject = stream;
return new Promise((resolve) => {
video.onloadedmetadata = () => {
resolve(video);
};
});
}
async function drawVideo(){
// Draw the video stream into our screen
ctx.drawImage(video, 0, 0);
// Call self again
requestAnimationFrame(drawVideo);
}
// Set up variables to draw on the canvas
var canvas;
var ctx;
async function main() {
// Set up front-facing camera
await setupCamera();
videoWidth = video.videoWidth;
videoHeight = video.videoHeight;
video.play()
// Set up the HTML Canvas to draw the video feed onto
canvas = document.getElementById('facecanvas');
canvas.width = videoWidth;
canvas.height = videoHeight;
ctx = canvas.getContext('2d');
// Start the video->canvas drawing loop
drawVideo()
}
// Big Eye Big Mouth Demo by Andy Kong
// Function for drawing larger eyes onto a face detected by Face Mesh
function drawEyesBig(face){
let mesh = face.scaledMesh;
// Left eye bounds (top, left, bottom, right) are the points (27, 130, 23, 243)
let lTop = mesh[27][1];
let lLeft = mesh[130][0];
let lBot = mesh[23][1];
let lRig = mesh[243][0];
let lWid = lRig-lLeft;
let lHei = lBot-lTop;
// Right eye bounds (top, left, bottom, right) are the points (257, 463, 253, 359)
let rTop = mesh[257][1];
let rLeft = mesh[463][0];
let rBot = mesh[253][1];
let rRig = mesh[359][0];
let rWid = rRig-rLeft;
let rHei = rBot-rTop;
// Draw each eye from the video onto each eye in the canvas, but twice as big
ctx.drawImage(video, rLeft, rTop, rWid, rHei,
rLeft - rWid*.5, rTop - rHei*.5, 2*rWid, 2*rHei);
ctx.drawImage(video, lLeft, lTop, lWid, lHei,
lLeft - lWid*.5, lTop - lHei*.5, 2*lWid, 2*lHei);
}
// Big Eye Big Mouth Demo by Andy Kong
// Function for drawing larger lips onto a face detected by Face Mesh
function drawLipsBig(face){
// Get lip's bounds from the face annotation's grouping of points
// We're taking the X coordinates of the upper lip (which we assume are equal with lower lip)
Xs = face.annotations.lipsUpperOuter.map(elem => elem[0])
// And the Y coordinates of the upper and lower lip
TopYs = face.annotations.lipsUpperOuter.map(elem => elem[1])
BotYs = face.annotations.lipsLowerOuter.map(elem => elem[1])
// We take the max and min of the Xs and Ys to get the lip's bounding box
lipRight = Math.max(...Xs);
lipLeft = Math.min(...Xs);
lipTop = Math.min(...TopYs);
lipBot = Math.max(...BotYs);
lipWid = lipRight-lipLeft;
lipHei = lipBot-lipTop;
// Get the pixel data from our canvas as ImageData, and convert it into a Tensorflow Tensor to flip it vertically
// The padding on the width and height is needed because the bounding box is a little tight
let lips = ctx.getImageData(lipLeft-lipWid*.05, lipTop-lipHei*.05,
lipWid+lipWid*.1, lipHei + lipHei*.1);
lipsUpsideDown = tf.browser.fromPixels(lips,4).reverse(0);
// Double the lip size
lips2x = tf.image.resizeBilinear(lipsUpsideDown, [lipsUpsideDown.shape[0]*2, lipsUpsideDown.shape[1]*2]);
lips2x = lips2x.asType("int32");
// Create a new, empty ImageData to dump our Tensor back onto the canvas
tmpIm = new ImageData(lips2x.shape[1],lips2x.shape[0]);
tmpIm.data.set(lips2x.dataSync());
ctx.putImageData(tmpIm, Math.round(lipLeft-tmpIm.width*.25), Math.round(lipTop-tmpIm.height*.25));
// Clean up the memory from our Tensors
lipsUpsideDown.dispose();
lips2x.dispose();
}
async function drawVideo(){
ctx.drawImage(video, 0, 0);
for (face of curFaces){
drawFace(face);
}
requestAnimationFrame(drawVideo);
}
// Draws the current eyes onto the canvas, directly from video streams
async function drawFace(face){
ctx.fillStyle = 'cyan';
for (pt of face.scaledMesh){
ctx.beginPath();
ctx.ellipse(pt[0], pt[1], 3, 3, 0, 0, 2*Math.PI)
ctx.fill();
}
}
var curFaces = [];
async function renderPrediction() {
// Call face Mesh on our video stream
const facepred = await fmesh.estimateFaces(video);
// If we find a face, export it to a global variable so we can access it elsewhere
if (facepred.length > 0) {
curFaces = facepred;
}
// Call itself again
requestAnimationFrame(renderPrediction);
};
var canvas;
var ctx;
async function main() {
fmesh = await facemesh.load({detectionConfidence:0.9, maxFaces:3});
// Set up front-facing camera
await setupCamera();
videoWidth = video.videoWidth;
videoHeight = video.videoHeight;
video.play()
// HTML Canvas for the video feed
canvas = document.getElementById('facecanvas');
canvas.width = videoWidth;
canvas.height = videoHeight;
ctx = canvas.getContext('2d');
drawVideo()
renderPrediction();
}
// PPG Demo by Andy Kong
// History tracking for the intensity of the face box
// Set up variables for tracking the average intensity
// The desktop's camera goes at around 10Hz, and we want ~5 seconds of history
var maxHistLen = 64;
var bloodHist = Array(maxHistLen).fill(0);
var timingHist = Array(maxHistLen).fill(0);
var last = performance.now();
// A one-liners to help us track the history
var average = (array) => array.reduce((a, b) => a + b) / array.length;
// Draws the current eyes onto the canvas, directly from video streams
async function drawFaces(){
ctx.strokeStyle = "cyan";
ctx.lineWidth = 2;
for (face of curFaces){
if (face.faceInViewConfidence > .90) {
let mesh = face.scaledMesh;
// Get the facial region of interest's bounds
boxLeft = mesh[117][0];
boxTop = mesh[117][1];
boxWidth = mesh[346][0] - boxLeft;
boxHeight = mesh[164][1] - boxTop;
// Draw the box a bit larger for debugging purposes
ctx.beginPath();
const boxsize = 4;
ctx.rect(boxLeft-boxsize, boxTop-boxsize, boxWidth+boxsize*2, boxHeight+boxsize*2);
ctx.stroke();
// Get the image data from that region
let bloodRegion = ctx.getImageData(boxLeft, boxTop, boxWidth, boxHeight);
// Get the area into Tensorflow, then split and average it
videoDataSum = bloodRegion.data.reduce((a, b) => a + b, 0);
videoDataSum -= boxWidth*boxHeight*255; // remove alpha channel
avgIntensity = videoDataSum/(boxWidth*boxHeight*3);
// Track FPS of this loop as well
timingHist.push(1/((performance.now() - last)*.001));
last = performance.now();
// Append intensity and FPS to an array and shift out the first element if the array gets too long
bloodHist.push(bloodHist[maxHistLen-1]*.8 + .2*avgIntensity);
if (bloodHist.length > maxHistLen){
bloodHist.shift();
timingHist.shift();
fftData = await calcFFT(bloodHist);
updateChart(timingHist,fftData);
updateChart2(bloodHist);
}
}
}
}
// PPG Demo by Andy Kong
// FFT function in Javascript
// - Make sure to include the fft.js in a <script> tag in the HTML header.
// - We initialize the FFT object in Javascript using this command in the main
// fft = new window.kiss.FFTR(maxHistLen);
async function calcFFT(data){
// Remove offset
const avg = average(data);
data = data.map(elem => elem-avg);
// Calculate FFT
tmp = fft.forward(data);
// Remove DC term (should be 0 anyway) and return
return tmp.slice(1);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment