Skip to content

Instantly share code, notes, and snippets.

@chrislaughlin
Last active August 4, 2020 22:42
Show Gist options
  • Save chrislaughlin/c49ef1e8276aeccc78622ab0ca266002 to your computer and use it in GitHub Desktop.
Save chrislaughlin/c49ef1e8276aeccc78622ab0ca266002 to your computer and use it in GitHub Desktop.
//checking the time
let startTime, endTime, seconds = 0;
function startTimer() {
startTime = performance.now();
};
function endTimer() {
endTime = performance.now();
var timeDiff = endTime - startTime; //in ms
// strip the ms
timeDiff /= 1000;
// get seconds
seconds = Math.round(timeDiff);
//console.log(seconds + " inside endTime seconds");
return seconds;
}
//timer function to get current timestamp
let pageLoadtime;
let endPageTime;
function addZero(num) {
return num < 10 ? `0${num}` : num;
}
function dateTime() {
const today = new Date();
const hours = addZero(today.getHours());
const minutes = addZero(today.getMinutes());
const secs = addZero(today.getSeconds());
return `${hours}:${minutes}:${secs}`;
};
onload = (pageLoadtime = dateTime());
console.log(dateTime());
let videoStartTime;
//starts video stream
let video = document.querySelector("#videoElement");
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({
video: true
}).then(function(stream) {
video.srcObject = stream;
setInterval(function() {
objects();
}, 4000);
})
.catch(function(err0r) {
console.log("Video stream error", err0r);
});
}
//function to check and predict if there are foregin objects in stream - phones, books, TODO headphones
//TODO - train model for better detection
//TODO - return image and alert data to database
async function objects() {
cocoSsd.load().then(model => {
// detect objects in the image.
model.detect(video).then(predictions => {
console.log('Predictions: ', predictions);
});
});
}
const videoFeed = document.querySelector('stream');
let model;
// checking the video has loaded in, loading face model and setting 3 sec interval to check movements
video.onloadeddata = async (event) => {
videoStartTime = dateTime();
console.log('video loaded @' + dateTime());
// Load the MediaPipe facemesh model.
model = await facemesh.load();
console.log(model);
setInterval(function() {
main();
}, 3000);
};
//creating the ajax to store the variables in the database
let dir;
let canvas;
let dataURI;
function storingData(ntip) {
var formData = {
'ntxaxis': ntip[0],
'ntyaxis': ntip[1],
'ntzaxis': ntip[2],
'direct': dir,
'canvas': dataURI
};
// process the form
$.ajax({
type: 'POST',
url: 'https://laurencefay.com/examdetector/pro.php',
data: formData,
dataType: 'json',
})
};
//main function uses faceMesh to determine and collect 3D facial points and return data
async function main() {
const videoElem = document.querySelector('#videoElement');
const predictions = await model.estimateFaces(videoElem);
if (typeof model.estimateFaces(videoElem) == 'undefined') {
console.log('no face present');
} else {
const ntip = predictions[0]['annotations']['noseTip'];
const lCheek = predictions[0]['annotations']['leftCheek'];
const rCheek = predictions[0]['annotations']['rightCheek'];
const midEye = predictions[0]['annotations']['midwayBetweenEyes'];
if (predictions.length > 0) {
for (let i = 0; i < predictions.length; i++) {
// console.log(predictions);
const keypoints = predictions[i].annotations;
console.log(ntip[0]);
//nose tip recognition - creates x,y,z axis data points
//returns alongside captured image to be returned to admin panel.
if (ntip[0][0] > 420) {
dir = 'left';
console.log('Left')
//creating the image canvas for recording alert
canvas = document.createElement('canvas');
canvas.width = 320;
canvas.height = 240;
let ctx = canvas.getContext('2d');
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
dataURI = canvas.toDataURL('image/jpeg');
// console.log(dataURI);
storingData(ntip[0]);
} else if (ntip[0][0] < 300) {
dir = 'right';
canvas = document.createElement('canvas');
canvas.width = 320;
canvas.height = 240;
let ctx = canvas.getContext('2d');
//draw image to canvas. scale to target dimensions
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
//convert to desired file format
dataURI = canvas.toDataURL('image/jpeg');
// alert('Head facing Left');
// console.log(dataURI);
console.log('Right')
// alert('Head facing right');
storingData(ntip[0]);
}
//cheek facial point recognition - creates x,y,z axis data points
//returns alongside captured image to be returned to admin panel.
if ((rCheek[0][0] > 325) || (lCheek[0][0] < 350)) {
if (lCheek[0][0] < 350) {
dir = 'right';
console.log('facing right')
canvasr = document.createElement('canvas');
canvasr.width = 320;
canvasr.height = 240;
let ctxr = canvasr.getContext('2d');
ctxr.drawImage(video, 0, 0, canvasr.width, canvasr.height);
dataURIRC = canvasr.toDataURL('image/jpeg');
// TODO storingData(rCheek[0]);
} else if (rCheek[0][0] > 325) {
dir = 'left';
console.log('facing left')
canvasr = document.createElement('canvas');
canvasr.width = 320;
canvasr.height = 240;
let ctxr = canvasr.getContext('2d');
ctxr.drawImage(video, 0, 0, canvasr.width, canvasr.height);
dataURIRC = canvasr.toDataURL('image/jpeg');
// TODO storingData(lCheek[0]);
}
// console.log(rCheek[0]+" left cheek "+dateTime());
}
//midEye facial point recognition - creates x,y,z axis data points
//based on the y axis
//returns alongside captured image to be returned to admin panel.
console.log("mideye " + midEye[0]);
if (midEye[0][1] < 245) {
dir = 'up';
console.log('looking up')
canvasu = document.createElement('canvas');
canvasu.width = 320;
canvasu.height = 240;
let ctxu = canvasu.getContext('2d');
ctxu.drawImage(video, 0, 0, canvasu.width, canvasu.height);
dataURIU = canvasu.toDataURL('image/jpeg');
// TODO storingData(midEye[0]);
} else if (midEye[0][1] > 295) {
dir = 'down';
console.log("looking down");
canvasu = document.createElement('canvas');
canvasu.width = 320;
canvasu.height = 240;
let ctxu = canvasu.getContext('2d');
ctxu.drawImage(video, 0, 0, canvasu.width, canvasu.height);
dataURIU = canvasu.toDataURL('image/jpeg');
// TODO storingData(midEye[0]);
}
}
}
}
}
let visibleChangeCount = 0;
let notVisibleChangeCount = 0;
let visibleTimeCount;
let notVisibleTimeCount;
let totalVisTime = 1;
let countTime = 0;
let dateAndTime
let secsOffTab;
//checks if the visibility has changed
document.addEventListener("visibilitychange", function() {
if (document.visibilityState === 'visible') {
startTimer();
visibleTimeCount = dateTime();
console.log('visible ' + visibleTimeCount);
visibleChangeCount += 1;
console.log('visible total count =' + visibleChangeCount)
} else {
//countTime = endTimer();
notVisibleTimeCount = dateTime();
console.log('not visible ' + notVisibleTimeCount);
notVisibleChangeCount += 1;
console.log('not visible total count =' + notVisibleChangeCount)
/*
TODO issue with returning total times
// console.log(endTimer() + " is end timer alone");
//console.log(countTime + " this is the seconds as a count inside end");
// totalVisTime += countTime; */
}
let timeES;
if (notVisibleChangeCount > 0) {
/* let timeStart = new Date();
let timeEnd = new Date();
visibleTimeCount.split(':');
notVisibleTimeCount.split(':');
timeStart.setHours(visibleTimeCount[0], visibleTimeCount[1], visibleTimeCount[2], 0)
timeEnd.setHours(notVisibleTimeCount[0], notVisibleTimeCount[1], notVisibleTimeCount[2], 0)
timeES= (timeEnd - timeStart)/1000;
*/
// millisecond
/* secsOffTab = (notVisibleTimeCount - visibleTimeCount)/1000;
secsOffTab /=60;
Math.abs(Math.round(secsOffTab));
console.log('difference in time is ' + secsOffTab); */
}
//not working
// console.log(totalVisTime + " is total vis time");
});
//function timeCalc(num1, num2){
// return num1-num2;
// }
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment