Skip to content

Instantly share code, notes, and snippets.

@newbenhd
Created September 18, 2019 23:14
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save newbenhd/7b4b83742b6d947febe989f9d3f9654a to your computer and use it in GitHub Desktop.
Save newbenhd/7b4b83742b6d947febe989f9d3f9654a to your computer and use it in GitHub Desktop.
const VideoComponent = props => {
useEffect(() => {
const video = document.getElementById('srcVideo');
video.crossOrigin = "anonymous";
const bind = async () => {
const net = await posenet.load({
architecture: input.architecture,
outputStride: input.outputStride,
inputResolution: input.inputResolution,
multiplier: input.multiplier,
quantBytes: input.quantBytes
});
const width = parseInt(video.offsetWidth);
const height = parseInt(video.offsetHeight);
detectPoseInRealTime(video, net, 'srcCanvas', width, height);
};
bind();
}, []);
return <div className={'videoComponent'}>
<video playsInline controls id={'srcVideo'} src={'../' + props.url}></video>
<canvas id={'srcCanvas'} />
</div>
};
function detectPoseInRealTime(video, net, canvasId = 'output', width = videoWidth, height = videoHeight) {
console.log(canvasId, video, net);
const canvas = document.getElementById(canvasId);
const ctx = canvas.getContext('2d');
// since images are being fed from a webcam, we want to feed in the
// original image and then just flip the keypoints' x coordinates. If instead
// we flip the image, then correcting left-right keypoint pairs requires a
// permutation on all the keypoints.
const flipPoseHorizontal = true;
canvas.width = width;
canvas.height = height;
// call pose on each frame
async function poseDetectionFrame() {
let minPoseConfidence;
let minPartConfidence;
const pose = await net.estimateSinglePose(video, {
flipHorizontal: flipPoseHorizontal,
decodingMethod: 'single-person'
});
minPoseConfidence = +singlePoseDetection.minPoseConfidence;
minPartConfidence = +singlePoseDetection.minPartConfidence;
ctx.clearRect(0, 0, width, height);
ctx.save();
ctx.scale(-1, 1);
// ctx.translate(0, -height);
ctx.drawImage(video, 0, 0, width, height);
ctx.restore();
// For each pose (i.e. person) detected in an image, loop through the poses
// and draw the resulting skeleton and keypoints if over certain confidence
// scores
const { score, keypoints } = pose;
// if (canvasId === 'srcCanvas') {
// console.log(score, keypoints);
// }
if (score >= minPoseConfidence) {
drawKeypoints(keypoints, minPartConfidence, ctx);
drawSkeleton(keypoints, minPartConfidence, ctx);
}
requestAnimationFrame(poseDetectionFrame);
}
poseDetectionFrame();
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment