Created
December 28, 2022 23:41
-
-
Save scottyob/331027d9f06431d32395281d47b5f27b to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import cv2 | |
import cv2.aruco as aruco | |
import numpy as np | |
from enum import Enum | |
from math import cos, sin | |
import pandas as pd | |
# define a video capture object (continuinity camera) | |
# Swap it out in the instance of it picking the internal MacBook camera | |
# (why it's not constant I don't know) | |
vid = cv2.VideoCapture(1) | |
ret, image = vid.read() | |
while not ret: | |
ret, image = vid.read() | |
if(len(image) < 1080): | |
print("Switching cameras") | |
vid.release() | |
vid = cv2.VideoCapture(0) | |
print(f"Length of camera: {len(image)}") | |
# ArUco markers | |
arucoDict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL) | |
arucoParams = cv2.aruco.DetectorParameters_create() | |
# Spat out from calibrate script | |
# Camera Matrix | |
matrix_coefficients = [[1386.8525937153158, 0.0, 973.8945339231749], [0.0, 1379.7927191278873, 523.1697254477391], [0.0, 0.0, 1.0]] | |
matrix_coefficients = np.array(matrix_coefficients) | |
# Distortion coefficient: | |
distortion_coefficients = [[0.09192289627057688, -0.15609565505249542, 0.0034213829390910915, 0.00017787158723953325, 0.057816966357919074]] | |
distortion_coefficients = np.array(distortion_coefficients) | |
class OriginLocation(Enum): | |
CENTER = 1 | |
BOTTOM_RIGHT = 2 | |
BOTTOM_LEFT = 3 | |
def getTranslationMatrix(tvec): | |
T = np.identity(n=4) | |
T[0:3, 3] = tvec | |
return T | |
def getTransformMatrix(rvec, tvec): | |
mat = getTranslationMatrix(tvec) | |
mat[:3, :3] = cv2.Rodrigues(rvec)[0] | |
return mat | |
def relativeTransformMatrix(rotation, translation): | |
xC, xS = cos(rotation[0]), sin(rotation[0]) | |
yC, yS = cos(rotation[1]), sin(rotation[1]) | |
zC, zS = cos(rotation[2]), sin(rotation[2]) | |
dX = translation[0] | |
dY = translation[1] | |
dZ = translation[2] | |
Translate_matrix = np.array([[1, 0, 0, dX], | |
[0, 1, 0, dY], | |
[0, 0, 1, dZ], | |
[0, 0, 0, 1]]) | |
Rotate_X_matrix = np.array([[1, 0, 0, 0], | |
[0, xC, -xS, 0], | |
[0, xS, xC, 0], | |
[0, 0, 0, 1]]) | |
Rotate_Y_matrix = np.array([[yC, 0, yS, 0], | |
[0, 1, 0, 0], | |
[-yS, 0, yC, 0], | |
[0, 0, 0, 1]]) | |
Rotate_Z_matrix = np.array([[zC, -zS, 0, 0], | |
[zS, zC, 0, 0], | |
[0, 0, 1, 0], | |
[0, 0, 0, 1]]) | |
return np.dot(Rotate_Z_matrix, np.dot(Rotate_Y_matrix, np.dot(Rotate_X_matrix, Translate_matrix))) | |
def to_vectors(image, camera_matrix, distortion_matrix, corners, edge_length_cm, origin_location = OriginLocation.CENTER): | |
""" | |
Takes in an image, returns the rotation and translation vectors for a set of corners | |
""" | |
# Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients | |
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners, edge_length_cm, camera_matrix, | |
distortion_matrix) | |
if origin_location == OriginLocation.CENTER: | |
return (rvec, tvec) | |
if origin_location == OriginLocation.BOTTOM_RIGHT: | |
transform = [edge_length_cm / 2, -edge_length_cm / 2, 0] | |
if origin_location == OriginLocation.BOTTOM_LEFT: | |
transform = [-edge_length_cm / 2, -edge_length_cm / 2, 0] | |
transformMatrix = getTransformMatrix(rvec, tvec) | |
# Get the transform matrix we want to apply to the obtained marker position | |
mat = relativeTransformMatrix([0, 0, 0], transform) | |
# Now apply the transform to the original matrix by simply dot multiplying them | |
transformMatrix = np.dot(transformMatrix, mat) | |
rmat = transformMatrix[:3, :3] | |
tmat = transformMatrix[:3, 3:] | |
# Draw the axis, 5cm long | |
cv2.drawFrameAxes(image, matrix_coefficients, distortion_coefficients, rmat, tmat, 5) # Draw Axis | |
return (rmat, tmat) | |
df = pd.DataFrame([], columns=['length']) | |
while 1: | |
# Get a frame from the camera | |
ret, image = vid.read() | |
if not ret: | |
continue | |
cv2.flip(image, 1) | |
# Exit on 'q' | |
key = cv2.waitKey(1) & 0xFF | |
if key == ord('q'): | |
break | |
# Detect the markers in the image | |
corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(image, arucoDict, | |
parameters=arucoParams) | |
if not np.all(ids is not None): # If there are no markers found by detector | |
cv2.imshow('img', image) | |
continue | |
aruco.drawDetectedMarkers(image, corners, ids) # Draw A square around the markers | |
# 12 is the reference marker | |
# 11 is the location marker | |
REF_ID = 12 | |
LOCATION_ID = 11 | |
ids = [x[0] for x in ids.tolist()] | |
ref_mat = None | |
loc_mat = None | |
if REF_ID in ids: | |
ref = corners[ids.index(REF_ID)] | |
_, ref_mat = to_vectors(image, matrix_coefficients, distortion_coefficients, ref, 10, OriginLocation.BOTTOM_RIGHT) | |
if LOCATION_ID in ids: | |
ref = corners[ids.index(LOCATION_ID)] | |
_, loc_mat = to_vectors(image, matrix_coefficients, distortion_coefficients, ref, 4, OriginLocation.BOTTOM_LEFT) | |
if(ref_mat is not None and ref_mat.all() and loc_mat is not None and loc_mat.all()): | |
length = np.linalg.norm(loc_mat-ref_mat) | |
print(length) | |
df = df.append({'length': length}, ignore_index=True) | |
# Show image on display | |
cv2.imshow('img', image) | |
vid.release() | |
cv2.destroyAllWindows() | |
print(df.describe()) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment