Skip to content

Instantly share code, notes, and snippets.

@guybrush
Last active June 28, 2018 06:05
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save guybrush/b1bdaa7589d14b9d6474d34921f6d56e to your computer and use it in GitHub Desktop.
Save guybrush/b1bdaa7589d14b9d6474d34921f6d56e to your computer and use it in GitHub Desktop.
face-detection cam-example
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<title>face-landmark-cam-example</title>
</head>
<body>
<style>
#container {position:relative;}
#overlay {position:absolute; top:0; left:0;}
</style>
<h1>face-cam-detection</h1>
<div id="container">
<video id="video" autoplay playsinline></video>
<canvas id="overlay"></canvas>
</div>
<button id="play">play</button>
<div id="stats"></div>
<!-- <base href="https://rawgit.com/justadudewhohacks/face-api.js/master/"> -->
<base href="https://rawgit.com/guybrush/face-api.js/sourcemaps-built2/">
<script src="dist/face-api.js"></script>
<script>
let minConfidence = 0.7
var input
var loaded = false
var paused = true
var elButton = document.getElementById('play')
var elStats = document.getElementById('stats')
var elVideo = document.getElementById('video')
var elOverlay = document.getElementById('overlay')
var videoConstraints = {audia:false,video:{facingMode:'environment'}}
navigator.mediaDevices.getUserMedia(videoConstraints)
.then((stream)=>{
elVideo.srcObject = stream
// ;(async function(){
// console.log('vid -> netInput')
// input = await faceapi.toNetInput(elVideo)
// console.log('input ready')
// })()
})
.catch((err)=>{
console.log('error',err)
})
load()
elButton.addEventListener('click',onClick)
function onClick(){
if (paused) {
if (!loaded || elVideo.paused || elVideo.ended || !elVideo.srcObject)
return
paused = false
elButton.innerHTML = 'pause'
play()
} else {
paused = true
elButton.innerHTML = 'play'
}
}
async function load() {
elButton.innerHTML = 'loading models ..'
await faceapi.loadFaceDetectionModel('weights/')
console.log('did load detection-model')
// await faceapi.loadFaceLandmarkModel('weights/')
// console.log('did load landmark-model')
elOverlay.width = elVideo.videoWidth
elOverlay.height = elVideo.videoHeight
loaded = true
elButton.innerHTML = 'play'
}
async function play() {
if (paused || !loaded || elVideo.paused || elVideo.ended || !elVideo.srcObject)
return
var input = await faceapi.toNetInput(elVideo)
var {videoWidth,videoHeight} = elVideo
var t0 = Date.now()
var faces = await faceapi.locateFaces(input, minConfidence)
var t1 = Date.now()
// var landmarks = await faceapi.detectLandmarks(elVideo)
var t2 = Date.now()
var context = elOverlay.getContext('2d')
context.clearRect(0, 0, elOverlay.width, elOverlay.height)
faceapi.drawDetection(elOverlay,faces.map(det=>det.forSize(videoWidth,videoHeight)))
stats.innerHTML=`<pre>
detect faces: ${t1-t0}ms
detect landmarks: ${t2-t1}ms</pre>`
requestAnimationFrame(play)
}
</script>
</body>
</html>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment