Skip to content

Instantly share code, notes, and snippets.

@HemantKumar01
Created February 18, 2021 11:07
Show Gist options
  • Save HemantKumar01/07fea310b6b8a988261c5955250ce8c7 to your computer and use it in GitHub Desktop.
Save HemantKumar01/07fea310b6b8a988261c5955250ce8c7 to your computer and use it in GitHub Desktop.
Human nobundle error
import * as tf from "@tensorflow/tfjs";
import * as wasm from "@tensorflow/tfjs-backend-wasm";
import * as bodyPix from "@tensorflow-models/body-pix";
import Human from "../dist/face-detection/human.esm-nobundle.js";
const inputCanvas = document.getElementById("canvasVideo");
const video = document.getElementById("vid");
const outputCanvas = document.getElementById("canvas");
var net = 0;
const inpCtx = inputCanvas.getContext("2d");
var interval;
var ratio;
var human;
//configuration for face detection
var configForHuman = {
backend: 'wasm',
wasmPath: '../node_modules/@tensorflow/tfjs-backend-wasm/dist/',
gesture: {
enabled: false
},
face: {
mesh: {
enabled: false
},
iris: {
enabled: false
},
age: {
enabled: false
},
gender: {
enabled: false
},
emotion: {
enabled: false
},
embedding: {
enabled: true
}
},
body: {
enabled: false
},
hand: {
enabled: false
}
};
wasm.setWasmPaths("../node_modules/@tensorflow/tfjs-backend-wasm/dist/");
tf.setBackend("wasm").then(() => {
navigator.mediaDevices.getUserMedia({
video: true,
audio: true
}).then((stream) => {
video.srcObject = stream;
})
});
async function predictBodySegmentation() {
var time1 = new Date();
const segmentation = await net.segmentPerson(inputCanvas);
var time2 = new Date();
console.log("segmentation took: " + (time2 - time1) + " ms");
console.log(segmentation);
outputCanvas.height = inputCanvas.height;
outputCanvas.width = inputCanvas.width;
const coloredPartImage = bodyPix.toMask(
segmentation, {
r: 0,
g: 255,
b: 0,
a: 100,
}, {
r: 0,
g: 0,
b: 0,
a: 0,
},
true
);
const opacity = 0.7;
const flipHorizontal = false;
const maskBlurAmount = 0;
bodyPix.drawMask(
outputCanvas,
inputCanvas,
coloredPartImage,
opacity,
maskBlurAmount,
flipHorizontal
);
}
async function predictFaceDetection() {
var time1 = new Date();
var face = await human.detect(video, configForHuman); //face detecting
var time2 = new Date();
console.log(`face detection took: ${time2 - time1}ms`)
console.log(face)
}
function getImage(time = 0) {
inputCanvas.width = (49 / 100) * window.innerWidth;
inputCanvas.height = ratio * (49 / 100) * window.innerWidth;
ctxVid.drawImage(video, 0, 0, inputCanvas.width, inputCanvas.height);
predictBodySegmentation();
predictFaceDetection();
}
video.oncanplay = async () => {
ratio = video.videoHeight / video.videoWidth;
if (net === 0) {
var time1 = new Date();
human = new Human(); //ladoing the @vladmandic/human library for face detection
net = await bodyPix.load(); //loads @tensorflow/bodypix library for human segmentation
var time2 = new Date();
console.log("BodyPix and Human libraries loaded in: " + (time2 - time1) + " ms");
document.querySelector(".loader").style.display = "none";
}
interval = setInterval(getImage, 100);//for slow systems i am using interval instad of requestAnimationFrame
video.onpause = () => {
clearInterval(interval);
};
video.play();
};
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment