Skip to content

Instantly share code, notes, and snippets.

@texastony
Forked from edfungus/pupil.py
Created March 1, 2018 00:01
Show Gist options
  • Save texastony/0a811523f3307965dc79540e45968422 to your computer and use it in GitHub Desktop.
Save texastony/0a811523f3307965dc79540e45968422 to your computer and use it in GitHub Desktop.
Pupil Detection with Python and OpenCV
Icon?
.ipynb_checkpoints
#Identify pupils. Based on beta 1
import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0) #640,480
w = 640
h = 480
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
#downsample
#frameD = cv2.pyrDown(cv2.pyrDown(frame))
#frameDBW = cv2.cvtColor(frameD,cv2.COLOR_RGB2GRAY)
#detect face
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
faces = cv2.CascadeClassifier('haarcascade_eye.xml')
detected = faces.detectMultiScale(frame, 1.3, 5)
#faces = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#detected2 = faces.detectMultiScale(frameDBW, 1.3, 5)
pupilFrame = frame
pupilO = frame
windowClose = np.ones((5,5),np.uint8)
windowOpen = np.ones((2,2),np.uint8)
windowErode = np.ones((2,2),np.uint8)
#draw square
for (x,y,w,h) in detected:
cv2.rectangle(frame, (x,y), ((x+w),(y+h)), (0,0,255),1)
cv2.line(frame, (x,y), ((x+w,y+h)), (0,0,255),1)
cv2.line(frame, (x+w,y), ((x,y+h)), (0,0,255),1)
pupilFrame = cv2.equalizeHist(frame[y+(h*.25):(y+h), x:(x+w)])
pupilO = pupilFrame
ret, pupilFrame = cv2.threshold(pupilFrame,55,255,cv2.THRESH_BINARY) #50 ..nothin 70 is better
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_CLOSE, windowClose)
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_ERODE, windowErode)
pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_OPEN, windowOpen)
#so above we do image processing to get the pupil..
#now we find the biggest blob and get the centriod
threshold = cv2.inRange(pupilFrame,250,255) #get the blobs
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
#if there are 3 or more blobs, delete the biggest and delete the left most for the right eye
#if there are 2 blob, take the second largest
#if there are 1 or less blobs, do nothing
if len(contours) >= 2:
#find biggest blob
maxArea = 0
MAindex = 0 #to get the unwanted frame
distanceX = [] #delete the left most (for right eye)
currentIndex = 0
for cnt in contours:
area = cv2.contourArea(cnt)
center = cv2.moments(cnt)
cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00'])
distanceX.append(cx)
if area > maxArea:
maxArea = area
MAindex = currentIndex
currentIndex = currentIndex + 1
del contours[MAindex] #remove the picture frame contour
del distanceX[MAindex]
eye = 'right'
if len(contours) >= 2: #delete the left most blob for right eye
if eye == 'right':
edgeOfEye = distanceX.index(min(distanceX))
else:
edgeOfEye = distanceX.index(max(distanceX))
del contours[edgeOfEye]
del distanceX[edgeOfEye]
if len(contours) >= 1: #get largest blob
maxArea = 0
for cnt in contours:
area = cv2.contourArea(cnt)
if area > maxArea:
maxArea = area
largeBlob = cnt
if len(largeBlob) > 0:
center = cv2.moments(largeBlob)
cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00'])
cv2.circle(pupilO,(cx,cy),5,255,-1)
#show picture
cv2.imshow('frame',pupilO)
cv2.imshow('frame2',pupilFrame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#else:
#break
# Release everything if job is finished
cap.release()
cv2.destroyAllWindows()
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 52,
"metadata": {},
"outputs": [
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-52-138875c277ff>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0mframe\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcvtColor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mframe\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mcv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCOLOR_RGB2GRAY\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0mfaces\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCascadeClassifier\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'/usr/local/Cellar/opencv/3.3.1_1/share/OpenCV/haarcascades/haarcascade_eye.xml'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mdetected\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfaces\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetectMultiScale\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mframe\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1.3\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m5\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 17\u001b[0m \u001b[0mpupilFrame\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mframe\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0mpupilO\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mframe\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"#Identify pupils. Based on beta 1\n",
"\n",
"import numpy as np\n",
"import cv2\n",
"import time\n",
"\n",
"cap = cv2.VideoCapture('/Users/thomasknapp/Google Drive/Eye-phone/Videos/steve_60FPS_hort_1920x1080.MOV') #640,480\n",
"w = 1920\n",
"h = 1080\n",
"\n",
"while(cap.isOpened()):\n",
" ret, frame = cap.read()\n",
" if ret==True:\n",
" frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)\n",
" faces = cv2.CascadeClassifier('/usr/local/Cellar/opencv/3.3.1_1/share/OpenCV/haarcascades/haarcascade_eye.xml')\n",
" detected = faces.detectMultiScale(frame, 1.3, 5)\n",
" pupilFrame = frame\n",
" pupilO = frame\n",
" windowClose = np.ones((5,5),np.uint8)\n",
" windowOpen = np.ones((2,2),np.uint8)\n",
" windowErode = np.ones((2,2),np.uint8)\n",
"\n",
" #draw square\n",
" for (x,y,w,h) in detected:\n",
" cv2.rectangle(frame, (x,y), ((x+w),(y+h)), (0,0,255),1)\n",
" cv2.line(frame, (x,y), ((x+w,y+h)), (0,0,255),1)\n",
" cv2.line(frame, (x+w,y), ((x,y+h)), (0,0,255),1)\n",
" pupilFrame = cv2.equalizeHist(frame[y+int(h*.25):(y+h), x:(x+w)])\n",
" pupilO = pupilFrame\n",
" ret, pupilFrame = cv2.threshold(pupilFrame,55,255,cv2.THRESH_BINARY) #50 ..nothin 70 is better\n",
" pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_CLOSE, windowClose)\n",
" pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_ERODE, windowErode)\n",
" pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_OPEN, windowOpen)\n",
"\n",
" #so above we do image processing to get the pupil..\n",
" #now we find the biggest blob and get the centriod\n",
" \n",
" threshold = cv2.inRange(pupilFrame,250,255)#get the blobs\n",
" image, contours, hierarchy = cv2.findContours(threshold,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\n",
" \n",
"#if there are 3 or more blobs, delete the biggest and delete the left most for the right eye\n",
"#if there are 2 blob, take the second largest\n",
"#if there are 1 or less blobs, do nothing\n",
" if len(contours) >= 2:\n",
" #find biggest blob\n",
" maxArea = 0\n",
" MAindex = 0 #to get the unwanted frame \n",
" distanceX = [] #delete the left most (for right eye)\n",
" currentIndex = 0 \n",
" for cnt in contours:\n",
" if len(cnt) <= 2:\n",
" continue\n",
" area = cv2.contourArea(cnt)\n",
" center = cv2.moments(cnt)\n",
" cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00'])\n",
" distanceX.append(cx)\n",
" if area > maxArea:\n",
" maxArea = area\n",
" MAindex = currentIndex\n",
" currentIndex = currentIndex + 1\n",
" del contours[MAindex] #remove the picture frame contour\n",
" del distanceX[MAindex]\n",
" eye = 'right'\n",
"\n",
" if len(contours) >= 2: #delete the left most blob for right eye\n",
" if eye == 'right':\n",
" edgeOfEye = distanceX.index(min(distanceX))\n",
" else:\n",
" edgeOfEye = distanceX.index(max(distanceX))\n",
" del contours[edgeOfEye]\n",
" del distanceX[edgeOfEye]\n",
"\n",
" if len(contours) >= 1:#get largest blob\n",
" maxArea = 0\n",
" for cnt in contours:\n",
" area = cv2.contourArea(cnt)\n",
" if area > maxArea:\n",
" maxArea = area\n",
" largeBlob = cnt\n",
" if len(largeBlob) > 0:\n",
" center = cv2.moments(largeBlob)\n",
" cx,cy = int(center['m10']/center['m00']), int(center['m01']/center['m00'])\n",
" cv2.circle(pupilO,(cx,cy),5,255,-1)\n",
" #show picture\n",
" cv2.imshow('frame',pupilO)\n",
"# cv2.imshow('frame2',pupilFrame)\n",
" if cv2.waitKey(1) & 0xFF == ord('q'):\n",
" break\n",
" \n",
" #else:\n",
" #break\n",
"\n",
"# Release everything if job is finished\n",
"cap.release()\n",
"cv2.destroyAllWindows()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment