Skip to content

Instantly share code, notes, and snippets.

@sneakers-the-rat
Created September 27, 2018 05:45
Show Gist options
  • Save sneakers-the-rat/0be816cd9c025f2cb9f091798b3dafad to your computer and use it in GitHub Desktop.
Save sneakers-the-rat/0be816cd9c025f2cb9f091798b3dafad to your computer and use it in GitHub Desktop.
play/render elliot's pupil fit
# renamed files to 'vid.avi' and 'points.csv'
# deleted first row of points.csv
# call this and use -p 'y'/'n', -r 'y'/'n' to control playing and rendering the video
# or do ./elliot_pupil.py --help
# you could comment out the skvideo and tqdm imports and objects if ya don't want to install/render.
import cv2
import pandas as pd
import numpy as np
from skimage import draw, measure
from skvideo import io
from itertools import product
import argparse
from tqdm import trange
def draw_points(frame, x, y, color):
point_adds = product(range(-2,2), range(-2,2))
for pt in point_adds:
try:
frame[x+pt[0],y+pt[1]] = color
except IndexError:
pass
return frame
def main():
# load points, use first two rows as header
pts = pd.read_csv(base_dir+'points.csv', header=[0,1])
# rename columns, joining the multiindex
pts.columns = [' '.join(col).strip() for col in pts.columns.values]
# melt & clean dataframe into long format
pts = pts.melt(id_vars='frame')
pts['point'], pts['type'] = pts['variable'].str.split(' ', 1).str
n_pts = len(pts.point.unique())
pts.drop('variable', axis=1,inplace=True)
# pivot the df to two indices, frame, point, and the likelihood and coords as columns
pts = pd.pivot_table(pts, values='value', index=['frame', 'point'], columns='type')
frame_points = pts.groupby('frame')
# make color gradient
colors = np.column_stack((np.linspace(255,0,num=n_pts, dtype=np.int),
np.linspace(0,255,num=n_pts, dtype=np.int),
np.zeros((n_pts), dtype=np.int)))
if render:
# make writer
out_fn = base_dir + 'vid_draw.mp4'
writer = io.FFmpegWriter(out_fn)
# open video and play
vid = cv2.VideoCapture(base_dir + 'vid.avi')
if play:
cv2.namedWindow('play', flags=cv2.WINDOW_NORMAL)
n_frame = 0
thetas = np.linspace(-np.pi, np.pi, 50)
emod = measure.EllipseModel()
# iter frames, draw points
total_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
for i in trange(total_frames):
# if we try to quit, quit nicely
k = cv2.waitKey(1) & 0xFF
if k == ord('\r'):
break
# grab frame
ret, frame = vid.read()
if ret == False:
break
# draw points
n_frame = int(vid.get(cv2.CAP_PROP_POS_FRAMES))
# first the ellipse
rows = frame_points.get_group(n_frame)
xy = np.column_stack((rows['y'], rows['x']))
emod.estimate(xy)
e_points = emod.predict_xy(thetas).astype(np.int)
for e_pts in e_points:
frame = draw_points(frame, e_pts[0], e_pts[1], [0,0,255])
# and then the points themselves
for color, (idx, row) in zip(colors, rows.iterrows()):
frame = draw_points(frame, int(row['y']), int(row['x']), color)
if play:
cv2.imshow('play', frame)
if render:
writer.writeFrame(frame)
if play:
cv2.destroyAllWindows()
if render:
writer.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Play or render a pupil video')
parser.add_argument('-p', '--play', help="Play video? (y/n)")
parser.add_argument('-r', '--render', help="Render video? (y/n)")
args = parser.parse_args()
play = True
render = False
if args.play:
if args.play.lower() == 'n':
play = False
if args.render:
if args.render.lower() == 'y':
render = True
base_dir = "/Users/jonny/elliott_pupil/"
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment