Skip to content

Instantly share code, notes, and snippets.

@will-wow
Last active November 24, 2020 20:08
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save will-wow/356275c72443b4b922922335b81d171c to your computer and use it in GitHub Desktop.
Save will-wow/356275c72443b4b922922335b81d171c to your computer and use it in GitHub Desktop.
export const App: FunctionComponent = () => {
const [videoRef, status] = useWebcam();
{
/* Hooks... */
}
return (
<PredictionWrapper status="none">
{/* ... */}
<VideoContainer>
<Video
autoPlay
muted
playsInline
hide={!detections}
ref={videoRef}
onLoadedData={onVideoReady}
/>
{videoRef.current && (
<SvgContainer
viewBox={`0 0 ${videoRef.current.videoWidth} ${videoRef.current.videoHeight}`}
>
{detectedObjects.boxes.map((detection) => {
const { box, label } = detection;
const { left, top, width, height } = box;
return (
<rect
key={`${left}-${top}-${label}`}
x={left}
y={top}
width={width}
height={height}
stroke={detectionColor[label]}
fill="transparent"
strokeWidth="5"
/>
);
})}
</SvgContainer>
)}
</VideoContainer>
{/* ... */}
</PredictionWrapper>
);
};
export const detectionColor: Record<string, string> = {
loading: "black",
face: "red",
mask: "green",
both: "orange",
none: "black",
};
import { useWebcam } from "./lib/useWebcam";
export const App: FunctionComponent = () => {
// ...
// Use the new hook
const [detections, onVideoReady] = useDetection();
return (
<PredictionWrapper status="none">
{/* ... */}
<Video
autoPlay
muted
playsInline
ref={videoRef}
// Record when the video is ready
onLoadedData={onVideoReady}
/>
{/* ... */}
</PredictionWrapper>
);
};
import React from "react";
import { useDetectionModel } from "./lib/useDetectionModel";
const modelUrl = `https://storage.googleapis.com/path/to/model.json`;
export const App: React.FunctionComponent = () => {
const detectionModel = useDetectionModel(modelUrl);
return <PredictionWrapper>{/* ... */}<PredictionWrapper>
};
export const App: FunctionComponent = () => {
const detectionModel = useDetectionModel(modelUrl);
const [videoRef, status] = useWebcam();
const [detections, onVideoReady] = useDetection(detectionModel, videoRef);
// Calculate status
const detectionStatus = getDetectionStatus(detections);
return (
// Pass the status to the background.
<PredictionWrapper status={detectionStatus}>
<GlobalStyle />
<Message>Doctor Masky</Message>
<VideoContainer>
<Video
autoPlay
muted
playsInline
hide={!detections}
ref={videoRef}
onLoadedData={onVideoReady}
/>
</VideoContainer>
{/* Print the status message */}
<Message>{getMessage(detectionStatus, status)}</Message>
</PredictionWrapper>
);
};
export const detectionColor: Record<string, string> = {
loading: "black",
face: "red",
mask: "green",
both: "orange",
none: "black",
};
const PredictionWrapper = styled.div<{ status: string }>`
/* Use the status. */
background: ${({ status }) => detectionColor[status]};
`;
export const App: FunctionComponent = () => {
const detectionModel = useDetectionModel(modelUrl);
const [videoRef, status] = useWebcam();
const [detections, onVideoReady] = useDetection(detectionModel, videoRef);
// Calculate status
const detectionStatus = getDetectionStatus(detections);
return (
// Pass the status to the background.
<PredictionWrapper status={detectionStatus}>
<GlobalStyle />
<Message>Doctor Masky</Message>
<VideoContainer>
<Video
autoPlay
muted
playsInline
hide={!detections}
ref={videoRef}
onLoadedData={onVideoReady}
/>
</VideoContainer>
{/* Print the status message */}
<Message>{getMessage(detectionStatus, status)}</Message>
</PredictionWrapper>
);
};
export const detectionColor: Record<string, string> = {
loading: "black",
face: "red",
mask: "green",
both: "orange",
none: "black",
};
const PredictionWrapper = styled.div<{ status: string }>`
/* Use the status. */
background: ${({ status }) => detectionColor[status]};
`;
export const App: FunctionComponent = () => {
const detectionModel = useDetectionModel(modelUrl);
const [videoRef, status] = useWebcam();
const [detections, onVideoReady] = useDetection(detectionModel, videoRef);
const detectionStatus = getDetectionStatus(detections);
return (
<PredictionWrapper status={detectionStatus}>
{/* ... */}
<VideoContainer>
<Video
autoPlay
muted
playsInline
hide={!detections}
ref={videoRef}
onLoadedData={onVideoReady}
/>
{videoRef.current && (
// Use the video ref to set the width and height.
<SvgContainer
viewBox={`0 0 ${videoRef.current.videoWidth} ${videoRef.current.videoHeight}`}
></SvgContainer>
)}
</VideoContainer>
{/* ... */}
</PredictionWrapper>
);
};
const SvgContainer = styled.svg`
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
/* Flip video */
transform: scaleX(-1);
`;
import { useWebcam } from "./lib/useWebcam";
export const App: FunctionComponent = () => {
// use the new hook
const [videoRef, status] = useWebcam();
return (
<PredictionWrapper status="none">
<GlobalStyle />
<Message>Doctor Masky</Message>
<VideoContainer>
<Video
autoPlay
muted
playsInline
// Pass the ref to the video.
ref={videoRef}
/>
</VideoContainer>
{/* Show the video status. */}
<Message>{status}</Message>
</PredictionWrapper>
);
};
export type DetectionStatus = "loading" | "none" | "face" | "mask" | "both";
export const getDetectionStatus = (detections: PredictedObject[] | null) => {
if (!detections) {
return "loading";
}
let masks = 0;
let faces = 0;
detections.forEach((detection) => {
if (detection.label === "mask") {
masks += 1;
} else if (detection.label === "face") {
faces += 1;
}
});
return statusFromCounts(masks, faces);
};
const statusFromCounts = (masks: number, faces: number): DetectionStatus => {
if (masks > 0 && faces > 0) {
return "both";
}
if (masks > 0) {
return "mask";
}
if (faces > 0) {
return "face";
}
return "none";
};
export const webcamStatusMessage: Record<WebcamStatus, string> = {
waiting: "Waiting for camera...⌛",
connected: "Loading model...⌛",
failed: "Couldn't connect to camera. 😞",
};
export const detectionMessage: Record<DetectionStatus, string> = {
loading: "Loading model...🤔",
none: "I'm not sure. Try getting closer to the screen. 🤔",
both: "Ask your friends to put on a mask! 😒",
face: "Don't forget your mask! 😱",
mask: "Thanks for wearing a mask! 👏",
};
export const getMessage = (
detectionStatus: DetectionStatus,
webcamStatus: WebcamStatus,
started: boolean = true
): string => {
if (detectionStatus === "loading") {
return webcamStatusMessage[webcamStatus];
} else if (!started) {
return "✨ Ready to start ✨";
} else {
return detectionMessage[detectionStatus];
}
};
export type PredictedObjects = automl.PredictedObject[] | null;
export const useDetection = (
model: automl.ObjectDetectionModel | null,
videoRef: React.MutableRefObject<HTMLVideoElement | null>
): [automl.PredictedObject[] | null, () => void] => {
const [detections, setDetections] = useState<PredictedObjects>(null);
const [videoReady, setVideoReady] = useState(false);
const onVideoReady = useCallback(() => setWebcamReady(true), [setVideoReady]);
return [detections, onVideoReady];
};
export const DETECTION_INTERVAL = 500;
export const DETECTION_THRESHOLD = 0.65;
export const useDetection = (
model: automl.ObjectDetectionModel | null,
videoRef: React.MutableRefObject<HTMLVideoElement | null>
): [PredictedObjects, () => void] => {
const [detections, setDetections] = useState<PredictedObjects>(null);
const [videoReady, setVideoReady] = useState(false);
const onVideoReady = useCallback(() => setVideoReady(true), [setVideoReady]);
useEffect(() => {
const video = videoRef.current;
// Only run detections after the video and model are initialized.
if (!videoReady || !model || !video) return;
const detect = (video: HTMLVideoElement) =>
// Run the model, and ignore low-probability detections.
model.detect(video, { score: DETECTION_THRESHOLD });
// First run
detect(video).then(setDetections);
// Schedule detections.
const handle = setInterval(
() => detect(video).then(setDetections),
DETECTION_INTERVAL
);
// Clean up the interval if the useEffect is re-run.
return () => {
clearInterval(handle);
};
}, [videoReady, model, videoRef, setDetections]);
return [detections, onVideoReady];
};
export const DETECTION_INTERVAL = 500;
export const DETECTION_THRESHOLD = 0.65;
export const useDetection = (
model: automl.ObjectDetectionModel | null,
videoRef: React.MutableRefObject<HTMLVideoElement | null>
): [PredictedObjects, () => void] => {
const [detections, setDetections] = useState<PredictedObjects>(null);
const [videoReady, setVideoReady] = useState(false);
const onVideoReady = useCallback(() => setVideoReady(true), [setVideoReady]);
// Warm up model while the camera is connecting.
useEffect(() => {
if (model) {
warmUp(model);
}
}, [model]);
useEffect(() => {
const video = videoRef.current;
if (!videoReady || !model || !video) return;
const detect = (video: HTMLVideoElement) =>
model.detect(video, { score: DETECTION_THRESHOLD });
detect(video).then(setDetections);
const handle = setInterval(
() => detect(video).then(setDetections),
DETECTION_INTERVAL
);
return () => {
clearInterval(handle);
};
}, [videoReady, model, videoRef, setDetections]);
return [detections, onVideoReady];
};
import { useState, useEffect } from "react";
import * as automl from "@tensorflow/tfjs-automl";
export const useDetectionModel = (
modelUrl: string
): automl.ObjectDetectionModel | null => {
const [model, setModel] = useState<automl.ObjectDetectionModel | null>(null);
useEffect(() => {
automl.loadObjectDetection(modelUrl).then(setModel);
}, [modelUrl]);
return model;
};
import { useRef, useEffect, useState } from "react";
export type WebcamStatus = "waiting" | "connected" | "failed";
const defaultVideoConstraints = { facingMode: "user" };
export const useWebcam = (
videoConstraints: MediaTrackConstraints = defaultVideoConstraints
): [React.MutableRefObject<HTMLVideoElement | null>, WebcamStatus] => {
const videoRef = useRef<HTMLVideoElement | null>(null);
const [status, setStatus] = useState<WebcamStatus>("waiting");
useEffect(() => {
// Get video stream
navigator.mediaDevices
.getUserMedia({ video: videoConstraints })
.then((stream) => {
if (videoRef.current) {
videoRef.current.srcObject = stream;
setStatus("connected");
} else {
console.error("Webcam connected before video was ready.");
setStatus("failed");
}
})
.catch((error) => {
console.error(error);
setStatus("failed");
});
}, [videoConstraints]);
return [videoRef, status];
};
const warmUp = (model: automl.ObjectDetectionModel) => {
const dummyImage = tf.zeros<tf.Rank.R3>([3, 3, 3]);
model.detect(dummyImage);
};
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment