Skip to content

Instantly share code, notes, and snippets.

@mkocabas
Created September 23, 2018 11:07
Show Gist options
  • Save mkocabas/0a44041dbdf3e3cd4a79012066f61850 to your computer and use it in GitHub Desktop.
Save mkocabas/0a44041dbdf3e3cd4a79012066f61850 to your computer and use it in GitHub Desktop.
import numpy as np
import cv2
import src.data_utils as data_utils
from src.data_utils import load_data, project_to_cameras
import os
import matplotlib.pyplot as plt
from src import cameras, viz
def drawlines(img1,img2,lines,pts1,pts2):
''' img1 - image on which we draw the epilines for the points in img2
lines - corresponding epilines '''
r,c = img1.shape
img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)
img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)
for r,pt1,pt2 in zip(lines,pts1,pts2):
color = tuple(np.random.randint(0,255,3).tolist())
x0,y0 = map(int, [0, -r[2]/r[1] ])
x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
img1 = cv2.line(img1, (x0,y0), (x1,y1), color,1)
img1 = cv2.circle(img1,tuple(pt1),5,color,-1)
img2 = cv2.circle(img2,tuple(pt2),5,color,-1)
return img1,img2
def drawskeleton(img1,img2,kp1,kp2):
kp1 = kp1.astype(int)
kp2 = kp2.astype(int)
I = np.array([1, 2, 3, 1, 7, 8, 1, 13, 14, 14, 18, 19, 14, 26, 27]) - 1 # start points
J = np.array([2, 3, 4, 7, 8, 9, 13, 14, 16, 18, 19, 20, 26, 27, 28]) - 1 # end points
LR = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)
lcolor, rcolor = (255,0,0), (0,0,255)
for j in range(len(I)):
img1 = cv2.line(img1, tuple(kp1[I[j]]), tuple(kp1[J[j]]), lcolor if LR[j] else rcolor, 3)
img2 = cv2.line(img2, tuple(kp2[I[j]]), tuple(kp2[J[j]]), lcolor if LR[j] else rcolor, 3)
return img1, img2
def read_data(subject, action, root_dir='/media/muhammed/Other/RESEARCH/datasets/Human3.6M/ours/images'):
# cameras = ['54138969', '55011271', '58860488', '60457274']
rcams = cameras.load_cameras('data/h36m/cameras.h5', [subject])
actions = data_utils.define_actions(action)
print rcams.keys()
points3D = load_data('data/h36m', [subject], actions, dim=3)
points2D = project_to_cameras(points3D, rcams)
# print points3D.keys()
# print points2D.keys()
cam_ids = [1, 2, 3, 4]
cam_params = [rcams[(subject, x)] for x in cam_ids] # R, T, f, c, k, p, name
keys2D = [(subject, action, action + '.' + str(x[-1]) + '.h5') for x in cam_params]
temp = [points2D[x].reshape(points2D[x].shape[0], -1, 2) for x in keys2D]
points2D = temp
key3D = (subject, action, action + '.h5')
temp = points3D[key3D].reshape(points3D[key3D].shape[0], -1, 3)
points3D = temp
folders = [os.path.join(root_dir, 'S'+str(subject), action + '.' + str(x[-1])) for x in cam_params]
print folders
images = []
for folder in folders:
images.append([os.path.join(folder, x) for x in sorted(os.listdir(folder))])
return images, points3D, points2D, cam_params
def get_intrinsic_matrix(params):
fx, fy = params[2]
cx, cy = params[3]
K = np.array([[fx, 0., cx],[0., fy, cy],[0., 0., 1.]]).astype(np.double)
return K
def get_projection_matrix(params):
K = get_intrinsic_matrix(params)
T = get_tvec(params)
return np.dot(K, np.concatenate((params[0], T), axis=1))
def get_tvec(params, *args):
if isinstance(params, tuple):
return np.dot(params[0], np.negative(params[1]))
else:
return np.dot(params, np.negative(args[0]))
def get_dist_coeffs(params):
return np.array([params[4][0], params[4][1], params[5][0], params[5][1], params[4][2]])
def check_image(img):
if img.shape == (1002, 1000, 3):
img = img[:1000, :, :]
elif img.shape == (1002, 1000):
img = img[:1000, :]
return img
def epilines(img1, img2, kp1, kp2):
pts1 = np.int32(kp1)
pts2 = np.int32(kp2)
F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
# We select only inlier points
pts1 = pts1[mask.ravel() == 1]
pts2 = pts2[mask.ravel() == 1]
# Find epilines corresponding to points in right image (second image) and
# drawing its lines on left image
lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
lines1 = lines1.reshape(-1, 3)
img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)
# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
lines2 = lines2.reshape(-1, 3)
img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)
img5, img3 = drawskeleton(img5, img3, kp1, kp2)
plt.subplot(121), plt.imshow(img5)
plt.subplot(122), plt.imshow(img3)
plt.draw()
plt.pause(1)
def useful_comments():
# img1 = check_image(cv2.imread(images_1[i], 0))
# img2 = check_image(cv2.imread(images_2[i], 0))
# R, T, f, c, k, p, name = cam_params[id_1]
# P = np.hstack((P, np.ones((32,1))))
# kp1 = np.dot(proj1, P.T)
# kp1 = (kp1[:2,:] / kp1[2,:]).T
# print P.reshape(-1,1).shape
# P, R, T, f, c, k, p = [P] + list(cam_params[id_1][:-1])
# kp1 = cameras.project_point_radial(P, R, T, f, c, k, p)[0]
#
# P, R, T, f, c, k, p = [P] + list(cam_params[id_2][:-1])
# kp2 = cameras.project_point_radial(P, R, T, f, c, k, p)[0]
# P_cam_1 = cameras.world_to_camera_frame(P, cam_params[id_1][0], cam_params[id_1][1])
# P_cam_2 = cameras.world_to_camera_frame(P, cam_params[id_2][0], cam_params[id_2][1])
# kp1, kp2 = kp1.reshape(-1, 2), kp2.reshape(-1, 2)
# for pr in cam_params[id_1][:-1]:
# print pr.shape
# kp1 = kp1.reshape(2,-1)
# kp1 = (f*kp1)+c
# draw3D(P_cam_1)
return 0
# def draw3D(channels, lcolor="#3498db", rcolor="#e74c3c", add_labels=False, global_coords=False, time=1):
# """
# Visualize a 3d skeleton
#
# Args
# channels: 96x1 vector. The pose to plot.
# ax: matplotlib 3d axis to draw on
# lcolor: color for left part of the body
# rcolor: color for right part of the body
# add_labels: whether to add coordinate labels
# Returns
# Nothing. Draws on ax.
# """
#
# ax = plt.subplot(projection='3d')
#
# assert channels.size == len(
# data_utils.H36M_NAMES) * 3, "channels should have 96 entries, it has %d instead" % channels.size
# vals = np.reshape(channels, (len(data_utils.H36M_NAMES), -1))
#
# I = np.array([1, 2, 3, 1, 7, 8, 1, 13, 14, 15, 14, 18, 19, 14, 26, 27]) - 1 # start points
# J = np.array([2, 3, 4, 7, 8, 9, 13, 14, 15, 16, 18, 19, 20, 26, 27, 28]) - 1 # end points
# LR = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)
#
# # Make connection matrix
# for i in np.arange(len(I)):
# x, y, z = [np.array([vals[I[i], j], vals[J[i], j]]) for j in range(3)]
# ax.plot(x, y, z, lw=2, c=lcolor if LR[i] else rcolor)
#
# if global_coords:
# WIDTH = 3000 / 2.
# HEIGHT = 4000 / 2.
# DEPTH = 2000 # space around the subject
# xroot, yroot, zroot = 0, 0, 0
# ax.set_xlim3d([-WIDTH + xroot, WIDTH + xroot])
# ax.set_zlim3d([0, DEPTH + zroot])
# ax.set_ylim3d([-HEIGHT + yroot, HEIGHT + yroot])
# else:
# RADIUS = 750 # space around the subject
# xroot, yroot, zroot = vals[0, 0], vals[0, 1], vals[0, 2]
# ax.set_xlim3d([-RADIUS + xroot, RADIUS + xroot])
# ax.set_zlim3d([-RADIUS + zroot, RADIUS + zroot])
# ax.set_ylim3d([-RADIUS + yroot, RADIUS + yroot])
#
# if add_labels:
# ax.set_xlabel("x")
# ax.set_ylabel("y")
# ax.set_zlabel("z")
#
# plt.gcf()
# plt.draw()
# plt.pause(time)
def main():
id_1, id_2 = 3, 1
images, points3D, points2D, cam_params = read_data(subject=11, action='Sitting')
images_1, p2d_1 = images[id_1], points2D[id_1]
images_2, p2d_2 = images[id_2], points2D[id_2]
K1 = get_intrinsic_matrix(cam_params[id_1])
K2 = get_intrinsic_matrix(cam_params[id_2])
proj1 = get_projection_matrix(cam_params[id_1])
proj2 = get_projection_matrix(cam_params[id_2])
fig = plt.figure(figsize=(9.6, 5.4))
plt.axis('off')
index = 0
for i in np.random.randint(0,1000,100): # range(0,100):
img1 = check_image(cv2.cvtColor(cv2.imread(images_1[i]), cv2.COLOR_BGR2RGB))
img2 = check_image(cv2.cvtColor(cv2.imread(images_2[i]), cv2.COLOR_BGR2RGB))
# kp1, kp2 = p2d_1[i], p2d_2[i]
distCoeffs1 = get_dist_coeffs(cam_params[id_1])
distCoeffs2 = get_dist_coeffs(cam_params[id_2])
P = points3D[i]
R, T, f, c, k, p, name = cam_params[id_1]
kp1, _ = cv2.projectPoints(objectPoints=P,
rvec=R,
tvec=get_tvec(cam_params[id_1]),
cameraMatrix=K1,
distCoeffs=distCoeffs1)
R, T, f, c, k, p, name = cam_params[id_2]
kp2, _ = cv2.projectPoints(objectPoints=P,
rvec=R,
tvec=get_tvec(cam_params[id_2]),
cameraMatrix=K2,
distCoeffs=distCoeffs2)
drawskeleton(img1, img2, kp1.reshape(-1, 2), kp2.reshape(-1, 2))
project3D = cv2.triangulatePoints(proj1, proj2, kp1.reshape(2,-1), kp2.reshape(2,-1))
P3 = project3D[:3, :] / project3D[3,:]
# P3 = cameras.world_to_camera_frame(P3.reshape(-1,3), R, T)
ax1 = plt.subplot(131)
ax1.imshow(img1)
ax1.axis('off')
# Plot 3d gt
ax2 = plt.subplot(132)
ax2.imshow(img2)
ax2.axis('off')
# Plot 3d predictions
ax3 = plt.subplot(133, projection='3d')
viz.show3Dpose(P3, ax3, lcolor="#ff0000", rcolor="#0000ff", add_labels=True)
plt.draw()
plt.pause(1)
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment