Skip to content

Instantly share code, notes, and snippets.

View bourdakos1's full-sized avatar

Nick Bourdakos bourdakos1

View GitHub Profile
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
const video = document.getElementById("video")
navigator.mediaDevices
.getUserMedia({
audio: false,
video: {
facingMode: "user",
width: 600,
height: 500
}
[{"id":"5b87f2d0.425eac","type":"tab","label":"Flow 1","disabled":false,"info":""},{"id":"a633f27d.72cc3","type":"http in","z":"5b87f2d0.425eac","name":"","url":"/dashboard","method":"get","upload":false,"swaggerDoc":"","x":140,"y":260,"wires":[["83927661.17c478"]]},{"id":"156284e1.bdaf6b","type":"template","z":"5b87f2d0.425eac","name":"","field":"payload","fieldType":"msg","format":"handlebars","syntax":"mustache","template":"<!DOCTYPE html>\n<html>\n <meta charset=\"utf-8\" />\n <body>\n <h1>{{payload.websocketUrl}}</h1>\n <canvas id=\"video-canvas\"></canvas>\n\n <script>\nconst init = () => {\n const videoCanvas = document.getElementById('video-canvas')\n const url = `{{{payload.websocketUrl}}}`\n new JSMpeg.Player(url, { canvas: videoCanvas })\n}\n\nif (document.readyState === 'loading') {\n document.addEventListener('DOMContentLoaded', init)\n} else {\n setTimeout(init, 500)\n}\n</script>\n <script>\n var JSMpeg={Player:null,VideoElement:null,BitBuffer:null,Source:{},Demuxer
import * as cocoSsd from "@tensorflow-models/coco-ssd";
const image = document.getElementById("image")
cocoSsd.load()
.then(model => model.detect(image))
.then(predictions => console.log(predictions))
function detectFrame() {
model.detect(video).then(predictions => {
renderOurPredictions(predictions)
requestAnimationFrame(detectFrame)
})
}
const x = prediction.bbox[0];
const y = prediction.bbox[1];
const width = prediction.bbox[2];
const height = prediction.bbox[3];
const canvas = document.getElementById("canvas");
const ctx = canvas.getContext("2d");
ctx.strokeRect(x, y, width, height);
let classifierID = "your-classifier-id"
let failure = { (error: Error) in print(error) }
visualRecognition.updateLocalModel(classifierID: classifierID, failure: failure) {
print("model updated")
}
let classifierID = "your-classifier-id"
let failure = { (error: Error) in print(error) }
let image = UIImage(named: "your-image-filename")
visualRecognition.classifyWithLocalModel(image: image, classifierIDs: [classifierID], failure: failure) {
classifiedImages in print(classifiedImages)
}
<link rel="import" href="../core-icon-button/core-icon-button.html">
<link rel="import" href="../core-toolbar/core-toolbar.html">
<link rel="import" href="../core-header-panel/core-header-panel.html">
<polymer-element name="my-element">
<template>
<style>
:host {
position: absolute;
// Classification method.
func classify(_ image: CGImage, completion: @escaping ([VNClassificationObservation]) -> Void) {
DispatchQueue.global(qos: .background).async {
// Initialize the coreML vision model, you can also use VGG16().model, or any other model that takes an image.
guard let vnCoreModel = try? VNCoreMLModel(for: Inceptionv3().model) else { return }
// Build the coreML vision request.
let request = VNCoreMLRequest(model: vnCoreModel) { (request, error) in
// We get get an array of VNClassificationObservations back
// This has the fields "confidence", which is the score