Created
August 16, 2020 22:23
-
-
Save lisajamhoury/4f859893dbe7b28496b4966c502690f3 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Started work on this, didn't finish. | |
//p5 code goes in this file | |
// include this for to use autofill in vscode | |
// see https://stackoverflow.com/questions/30136319/what-is-reference-path-in-vscode | |
/// <reference path="../shared/p5.d/p5.d.ts" /> | |
/// <reference path="../shared/p5.d/p5.global-mode.d.ts" /> | |
// peer variables | |
let startPeer; | |
// posenet variables | |
let video; | |
let poseNet; | |
// hold poses | |
let myPose = []; | |
let myPrevPose; | |
let partnerPose; | |
let partnerPrevPose; | |
let myFirstPose = true; | |
let partnerFirstPose = true; | |
// what example are we running? | |
let state = 1; | |
// use for developing without partner | |
let mirror = false; | |
const colors = { | |
x: 'rgba(0, 63, 84, 0.5)', | |
y: 'rgba(49, 128, 144, 0.5)', | |
z: 'rgba(82, 100, 118, 0.5)', | |
}; | |
function setup() { | |
// create p5 canvas | |
createCanvas(640, 480); | |
// create webcam capture for posenet | |
video = createCapture(VIDEO); | |
video.size(width, height); | |
const options = { | |
architecture: 'MobileNetV1', | |
imageScaleFactor: 0.3, | |
outputStride: 16, | |
flipHorizontal: false, | |
minConfidence: 0.5, | |
maxPoseDetections: 5, | |
scoreThreshold: 0.5, | |
nmsRadius: 20, | |
detectionType: 'single', | |
inputResolution: 513, | |
multiplier: 0.75, | |
quantBytes: 2, | |
}; | |
// Create a new poseNet method with a single detection | |
poseNet = ml5.poseNet(video, options, modelReady); | |
// This sets up an event that fills the global variable "poses" | |
// with an array every time new poses are detected | |
poseNet.on('pose', function (results) { | |
myPose = results; | |
// only runs once at beginning of sketch | |
if (myFirstPose) { | |
myPrevPose = myPose; | |
myFirstPose = false; | |
} | |
}); | |
// Hide the video element, and just show the canvas | |
video.hide(); | |
// start socket client automatically on load | |
// by default it connects to http://localhost:80 | |
WebRTCPeerClient.initSocketClient(); | |
// to connect to server remotely pass the ngrok address | |
// WebRTCPeerClient.initSocketClient('http://f54b8ef193dd.ngrok.io'); | |
// start the peer client | |
WebRTCPeerClient.initPeerClient(); | |
} | |
function modelReady() { | |
console.log('Model Loaded'); | |
} | |
function update() { | |
// send my pose over peer | |
WebRTCPeerClient.sendData(myPose); | |
// to test with one mirrored mouse | |
if (mirror) { | |
} else { | |
partnerPose = WebRTCPeerClient.getData(); | |
} | |
// only runs once at beginning of sketch | |
if (partnerFirstPose) { | |
myPrevPose = myPose; | |
partnerFirstPose = false; | |
} | |
} | |
function draw() { | |
// only proceed if the peer is started | |
if (!WebRTCPeerClient.isPeerStarted()) { | |
return; | |
} | |
update(); | |
background(255); | |
// draw my pose | |
drawKeypoints(myPose, myPrevPose, colors.x, 20); | |
// drawSkeleton(myPose, myPrevPose, colors.x, 20); | |
// make sure we have a partner pose before drawing | |
if (partnerPose !== null) { | |
// drawKeypoints(partnerPose, partnerPrevPose, colors.y, 0); | |
// drawSkeleton(partnerPose, partnerPrevPose, colors.y, 0); | |
} | |
myPrevPose = myPose; | |
partnerPrevPose = partnerPose; | |
stroke(0); | |
text(getFrameRate(), 10, 10); | |
} | |
// A function to draw ellipses over the detected keypoints | |
function drawKeypoints(poses, prevPoses, clr, offset) { | |
// Loop through all the poses detected | |
for (let i = 0; i < poses.length; i++) { | |
// For each pose detected, loop through all the keypoints | |
let pose = poses[i].pose; | |
let prevPose = prevPoses[i].pose; | |
for (let j = 0; j < pose.keypoints.length; j++) { | |
// A keypoint is an object describing a body part (like rightArm or leftShoulder) | |
let keypoint = pose.keypoints[j]; | |
let prevKeypoint = prevPose.keypoints[j]; | |
// Only draw an ellipse is the pose probability is bigger than 0.2 | |
if (keypoint.score > 0.2) { | |
let smoothedPoint = {}; | |
smoothedPoint.x = | |
1 * keypoint.position.x + 0 * prevKeypoint.position.x; | |
smoothedPoint.y = | |
1 * keypoint.position.y + 0 * prevKeypoint.position.y; | |
fill(clr); | |
noStroke(); | |
ellipse(smoothedPoint.x + offset, smoothedPoint.y, 10, 10); | |
} | |
} | |
} | |
} | |
// A function to draw the skeletons | |
function drawSkeleton(poses, clr, offset) { | |
// Loop through all the skeletons detected | |
for (let i = 0; i < poses.length; i++) { | |
let skeleton = poses[i].skeleton; | |
// For every skeleton, loop through all body connections | |
for (let j = 0; j < skeleton.length; j++) { | |
let partA = skeleton[j][0]; | |
let partB = skeleton[j][1]; | |
stroke(clr); | |
line( | |
partA.position.x + offset, | |
partA.position.y, | |
partB.position.x + offset, | |
partB.position.y, | |
); | |
} | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment