Last active
May 23, 2017 11:05
-
-
Save yogiblue/76c3d62b837e729b5b72 to your computer and use it in GitHub Desktop.
looking at motion detection on a specific area of a 640x480 video recording
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
################# | |
## Eclipse | |
################# | |
*.pydevproject | |
.project | |
.metadata | |
bin/ | |
tmp/ | |
*.tmp | |
*.bak | |
*.swp | |
*~.nib | |
local.properties | |
.classpath | |
.settings/ | |
.loadpath | |
# External tool builders | |
.externalToolBuilders/ | |
# Locally stored "Eclipse launch configurations" | |
*.launch | |
# CDT-specific | |
.cproject | |
# PDT-specific | |
.buildpath | |
################# | |
## Visual Studio | |
################# | |
## Ignore Visual Studio temporary files, build results, and | |
## files generated by popular Visual Studio add-ons. | |
# User-specific files | |
*.suo | |
*.user | |
*.sln.docstates | |
# Build results | |
[Dd]ebug/ | |
[Rr]elease/ | |
x64/ | |
build/ | |
[Bb]in/ | |
[Oo]bj/ | |
# MSTest test Results | |
[Tt]est[Rr]esult*/ | |
[Bb]uild[Ll]og.* | |
*_i.c | |
*_p.c | |
*.ilk | |
*.meta | |
*.obj | |
*.pch | |
*.pdb | |
*.pgc | |
*.pgd | |
*.rsp | |
*.sbr | |
*.tlb | |
*.tli | |
*.tlh | |
*.tmp | |
*.tmp_proj | |
*.log | |
*.vspscc | |
*.vssscc | |
.builds | |
*.pidb | |
*.log | |
*.scc | |
# Visual C++ cache files | |
ipch/ | |
*.aps | |
*.ncb | |
*.opensdf | |
*.sdf | |
*.cachefile | |
# Visual Studio profiler | |
*.psess | |
*.vsp | |
*.vspx | |
# Guidance Automation Toolkit | |
*.gpState | |
# ReSharper is a .NET coding add-in | |
_ReSharper*/ | |
*.[Rr]e[Ss]harper | |
# TeamCity is a build add-in | |
_TeamCity* | |
# DotCover is a Code Coverage Tool | |
*.dotCover | |
# NCrunch | |
*.ncrunch* | |
.*crunch*.local.xml | |
# Installshield output folder | |
[Ee]xpress/ | |
# DocProject is a documentation generator add-in | |
DocProject/buildhelp/ | |
DocProject/Help/*.HxT | |
DocProject/Help/*.HxC | |
DocProject/Help/*.hhc | |
DocProject/Help/*.hhk | |
DocProject/Help/*.hhp | |
DocProject/Help/Html2 | |
DocProject/Help/html | |
# Click-Once directory | |
publish/ | |
# Publish Web Output | |
*.Publish.xml | |
*.pubxml | |
*.publishproj | |
# NuGet Packages Directory | |
## TODO: If you have NuGet Package Restore enabled, uncomment the next line | |
#packages/ | |
# Windows Azure Build Output | |
csx | |
*.build.csdef | |
# Windows Store app package directory | |
AppPackages/ | |
# Others | |
sql/ | |
*.Cache | |
ClientBin/ | |
[Ss]tyle[Cc]op.* | |
~$* | |
*~ | |
*.dbmdl | |
*.[Pp]ublish.xml | |
*.pfx | |
*.publishsettings | |
# RIA/Silverlight projects | |
Generated_Code/ | |
# Backup & report files from converting an old project file to a newer | |
# Visual Studio version. Backup files are not needed, because we have git ;-) | |
_UpgradeReport_Files/ | |
Backup*/ | |
UpgradeLog*.XML | |
UpgradeLog*.htm | |
# SQL Server files | |
App_Data/*.mdf | |
App_Data/*.ldf | |
############# | |
## Windows detritus | |
############# | |
# Windows image file caches | |
Thumbs.db | |
ehthumbs.db | |
# Folder config file | |
Desktop.ini | |
# Recycle Bin used on file shares | |
$RECYCLE.BIN/ | |
# Mac crap | |
.DS_Store | |
############# | |
## Python | |
############# | |
*.py[cod] | |
# Packages | |
*.egg | |
*.egg-info | |
dist/ | |
build/ | |
eggs/ | |
parts/ | |
var/ | |
sdist/ | |
develop-eggs/ | |
.installed.cfg | |
# Installer logs | |
pip-log.txt | |
# Unit test / coverage reports | |
.coverage | |
.tox | |
#Translations | |
*.mo | |
#Mr Developer | |
.mr.developer.cfg |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# looking at motion detection on a specific area of a 640x480 video recording | |
# we record any motion detected and output images every n=60 frames or so (you can change this) | |
# we also record a csv file of total movement | |
# NOTE, it's all a bit hard coded because only I'm using it currently | |
import os | |
import datetime | |
import numpy as np | |
import cv2 | |
from Tkinter import Tk | |
from tkFileDialog import askopenfilename | |
from scipy import ndimage | |
def main(): | |
print "Doing motion detection on a single video...." | |
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing | |
# user selects a file in a directory of videos | |
# the python script then knows where to process files | |
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file | |
print(filename) | |
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') | |
# print os.path.basename(filename) | |
# print os.path.dirname(filename) | |
# go to the directory | |
os.chdir(os.path.dirname(filename)) | |
simpleFile = os.path.basename(filename) | |
# find the 201* part of the 2014 and then move along 2 to get to the year part (e.g. 14) | |
startpos = simpleFile.find('201') + 2 | |
# the time part we are interested in is 17 characters long | |
endpos = startpos + 17 | |
datetimeString = simpleFile[startpos:endpos] | |
outputdir = datetimeString + "_res" | |
if not os.path.exists(outputdir): | |
os.makedirs(outputdir) | |
#just a check | |
print datetimeString | |
# get a list of all the files | |
import glob | |
allfiles = glob.glob("*.wmv") | |
fmt = '%y-%m-%d_%H-%M-%S' | |
# just in case we want to delay processing for when we are asleep | |
#sleep(43200) | |
fd = open('videotime.csv','a') | |
#fd.write('Date, Total sum, All pixels, All>100, All>200, All>240\n') | |
fd.write('Date, total movement, total boxed area, number of blobs\n') | |
f = simpleFile | |
print "Doing " + f | |
datetimeString = f[startpos:endpos] | |
date_object = datetime.datetime.strptime(datetimeString, fmt) | |
print date_object | |
# open the video processing stuff | |
cap = cv2.VideoCapture(f) | |
if cv2.__version__=='2.4.6': | |
fourcc = cv2.cv.CV_FOURCC(*'XVID') | |
else: | |
fourcc = cv2.VideoWriter_fourcc(*'XVID') | |
out = cv2.VideoWriter(outputdir + '/output_blobs.avi',fourcc, 20.0, (640,480)) | |
count = 0 | |
timeVideo = 0 | |
#speed = 1 # very fast | |
#speed = 50 # quite fast | |
#speed = 100 # medium | |
speed = 100 | |
#print cv2.__version__ | |
if int(major_ver) < 3 : | |
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS) | |
print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps) | |
else : | |
fps = cap.get(cv2.CAP_PROP_FPS) | |
print "Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps) | |
#extract the background | |
if cv2.__version__=='2.4.6': | |
print "Using opencv2 version 2.4.6" | |
fgbg = cv2.BackgroundSubtractorMOG() | |
elif cv2.__version__=='3.0.0': | |
print "Using opencv2 version 3.0.0" | |
fgbg = cv2.createBackgroundSubtractorKNN() | |
else: | |
print "Using opencv2 untested version " | |
fgbg = cv2.createBackgroundSubtractorKNN() | |
# set up an array for tracking | |
track_res = np.zeros((480, 640), dtype=np.uint8) | |
track_res.astype(int) | |
track_res_total = np.zeros((480, 640), dtype=np.uint8) | |
track_res_total.astype(int) | |
track_res_trim = track_res | |
#create some empty image arrays | |
dilated_image = track_res_trim | |
blurred_image = track_res_trim | |
#average_image = track_res_trim | |
average_image = np.zeros((480, 640, 3), dtype=np.uint8) | |
average_image.astype(int) | |
bt = average_image | |
avg_image_write_count = 0 | |
# set up the screen | |
triangle = np.array([[620,0], [0,480], [0,0] ], np.int32) | |
triangle2 = np.array([[0,0], [620,0], [640,480] ], np.int32) | |
#bottom right | |
triangle3 = np.array([[300,480], [520,380], [640,480] ], np.int32) | |
#triangle3 = np.array([[400,480], [520,380], [640,480] ], np.int32) | |
#bottom left | |
triangle4 = np.array([[0,480], [120,380], [350,480] ], np.int32) | |
#triangle4 = np.array([[0,480], [120,380], [240,480] ], np.int32) | |
# initialise variables | |
count = 0 | |
total_summ = 0 | |
img_count = 0 | |
img_write = 0 | |
xbox1=0 | |
xbox2=0 | |
ybox1=0 | |
ybox2 = 0 | |
xstart_new=0 | |
ystart_new=0 | |
xstop_new=0 | |
ystop_new=0 | |
unified = [] | |
while(cap.isOpened()): | |
# read the frame | |
ret, frame1 = cap.read() | |
#if count==0: | |
#average_image = frame1 | |
#low_values_indices = average_image >= 0 | |
#average_image[low_values_indices] = 0 | |
#bt = average_image | |
if ret==False: | |
print "End of video" | |
break; | |
# apply the mask | |
fgmask1 = fgbg.apply(frame1) | |
# convert to grayscale | |
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) | |
# read the next frame | |
ret, frame2 = cap.read() | |
if ret==False: | |
print "End of video" | |
break; | |
# convert to grayscale | |
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY) | |
# apply the mask | |
fgmask2 = fgbg.apply(frame2) | |
# look at the difference between the two frames | |
# this removes any permanent white blobs that may appear | |
gray_res = fgmask2 - fgmask1 | |
#contours, hierarchy = cv2.findContours(fgmask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) | |
#if len(contours)>0: | |
#c = max(contours, key=cv2.contourArea) | |
#((x, y), radius) = cv2.minEnclosingCircle(c) | |
#M = cv2.moments(c) | |
#center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) | |
####if radius > 1: | |
####cv2.circle(gray_res, (int(x), int(y)), int(radius),(255, 0, 0), -1) | |
#cv2.circle(frame1, center, 5, (200, 200, 200), -1) | |
#cv2.drawContours(frame1, contours, -1, (255,255,255), 3) | |
cv2.fillConvexPoly(gray_res, triangle, 0) | |
cv2.fillConvexPoly(gray_res, triangle2, 0) | |
cv2.fillConvexPoly(gray_res, triangle3, 0) | |
cv2.fillConvexPoly(gray_res, triangle4, 0) | |
# draw it on frame1 | |
#cv2.fillConvexPoly(frame1, triangle, 0) | |
#cv2.fillConvexPoly(frame1, triangle2, 0) | |
#cv2.fillConvexPoly(frame1, triangle3, 0) | |
#cv2.fillConvexPoly(frame1, triangle4, 0) | |
#threshold the result | |
low_values_indices = gray_res < 25 | |
gray_res[low_values_indices] = 0 | |
# mask the second frame | |
gray2[low_values_indices] = 0 | |
# remove any noise | |
denoised = ndimage.median_filter(gray2, 4) | |
# get the high value positions | |
high_values_indices = gray2 > 0 | |
#use denoised fror a smoother result | |
#high_values_indices = denoised > 0 | |
# increment the result array | |
#track_res[high_values_indices] = track_res[high_values_indices] + 10 | |
# limit the max amount | |
#low_values_indices = track_res >= 245 | |
track_res[high_values_indices] = 240 | |
track_res_total[high_values_indices] = track_res_total[high_values_indices] + 15 | |
low_values_indices = track_res_total >= 235 | |
track_res_total[low_values_indices] = 230 | |
#high_values_indices = track_res > 0 | |
#high_values_indices = gray_res > 0 | |
frame1[high_values_indices] = 240 | |
# show the result array as a frame - nice to look at | |
cv2.putText(frame1, str(date_object), (50,50), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255,255,255)) | |
#incrementing the time depends on the frames per second and how the frequency | |
#of the recording of those frames - so it's not so straightforward | |
#date_object = date_object + datetime.timedelta(0,0,333333) | |
date_object = date_object + datetime.timedelta(0,0,4120000/fps) | |
#cv2.imshow('frame1',frame1) | |
#cv2.rectangle(frame1, (xbox1,ybox1), (xbox2,ybox2), (255,255,255), 3) | |
# move on | |
k = cv2.waitKey(speed) | |
# a measure of activity, not generally used | |
total_summ = (gray_res.sum() + total_summ)/2 | |
if k & 0xFF == ord('q'): | |
break | |
count = count + 1 | |
img_write = img_write + 1 | |
#cv2.imshow('frame1',frame1) | |
cv2.imshow('trackres', track_res_total) | |
# write out the results | |
cv2.imwrite(outputdir + "/result_total.jpg", track_res_total) | |
low_values_indices = track_res_total < 1 | |
track_res_total[low_values_indices] = 0 | |
activity_count = np.count_nonzero(track_res_total) | |
# writing out the data | |
# different measures of activity | |
#fd.write(str(date_object)) | |
#fd.write(',') | |
#fd.write(str(total_summ)) | |
#fd.write(',') | |
# this is the most useful one | |
#fd.write(str(activity_count)) | |
# look at different activity levels | |
low_values_indices = track_res_total <= 100 | |
track_res_total[low_values_indices] = 0 | |
activity_countx = np.count_nonzero(track_res_total) | |
#fd.write(',') | |
#fd.write(str(activity_countx)) | |
# look at different activity levels | |
low_values_indices = track_res_total <= 200 | |
track_res_total[low_values_indices] = 0 | |
activity_count2 = np.count_nonzero(track_res_total) | |
#fd.write(',') | |
#fd.write(str(activity_count2)) | |
# look at different activity levels | |
low_values_indices = track_res_total < 240 | |
track_res_total[low_values_indices] = 0 | |
activity_count3 = np.count_nonzero(track_res_total) | |
#fd.write(',') | |
#fd.write(str(activity_count3)) | |
#fd.write('\n') | |
#np.savetxt("foo2.csv", track_res_total, delimiter=",") | |
# the end | |
if exit == True: | |
print "Quitting analysis" | |
cap.release() | |
out.release() | |
cv2.destroyAllWindows() | |
print "Finished" | |
# end | |
fd.close() | |
def groupContours(contours, threshold): | |
if len(contours)>0: | |
LENGTH = len(contours) | |
status = np.zeros((LENGTH,1)) | |
for i,contour1 in enumerate(contours): | |
x = i | |
if i != LENGTH-1: | |
for j,contour2 in enumerate(contours[i+1:]): | |
x = x+1 | |
dist = find_if_close(contour1,contour2,threshold) | |
if dist == True: | |
val = min(status[i],status[x]) | |
status[x] = status[i] = val | |
else: | |
if status[x]==status[i]: | |
status[x] = i+1 | |
combined = [] | |
maximum = int(status.max())+1 | |
for i in xrange(maximum): | |
pos = np.where(status==i)[0] | |
if pos.size != 0: | |
cont = np.vstack(contours[i] for i in pos) | |
hull = cv2.convexHull(cont) | |
combined.append(hull) | |
return combined | |
def find_if_close(contour1, contour2, distance): | |
row1,row2 = contour1.shape[0],contour2.shape[0] | |
for i in xrange(row1): | |
for j in xrange(row2): | |
dist = np.linalg.norm(contour1[i]-contour2[j]) | |
if abs(dist) < distance : | |
return True | |
elif i==row1-1 and j==row2-1: | |
return False | |
def trimArray(arrayIn, percent): | |
print percent | |
# trim an array by a specified percent by fraction of 'mass' | |
total_sum = arrayIn.sum() | |
#print "Total sum is " + str(total_sum) | |
#do the x axis first | |
rows_sum = arrayIn.sum(0) | |
x_cum_sum = rows_sum.cumsum() | |
cut_res = np.argwhere(x_cum_sum>percent*total_sum/100) | |
xstart = cut_res.min(0) | |
print "New X start is " + str(xstart) | |
#reverse it | |
rev_array = np.fliplr([rows_sum])[0] | |
x_cum_sum = rev_array.cumsum() | |
ten_percent_res = np.argwhere(x_cum_sum>percent*total_sum/100) | |
xfinish = ten_percent_res.min(0) | |
xfinish = len(rev_array) - xfinish | |
print "New X end is " + str(xfinish) | |
#do the y axis | |
rows_sum = arrayIn.sum(1) | |
y_cum_sum = rows_sum.cumsum() | |
cut_res = np.argwhere(y_cum_sum>percent*total_sum/100) | |
ystart = cut_res.min(0) | |
print "New Y start is " + str(ystart) | |
#reverse it | |
rev_array = np.fliplr([rows_sum])[0] | |
y_cum_sum = rev_array.cumsum() | |
ten_percent_res = np.argwhere(y_cum_sum>percent*total_sum/100) | |
yfinish = ten_percent_res.min(0) | |
yfinish = len(rev_array) - yfinish | |
print "New Y end is " + str(yfinish) | |
return (xstart, xfinish, ystart, yfinish) | |
if __name__=="__main__": | |
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# looking at motion detection on a specific area of a 640x480 video recording | |
# we record any motion detected and output images every n=60 frames or so (you can change this) | |
# we also record a csv file of total movement | |
# NOTE, it's all a bit hard coded because only I'm using it currently | |
import os | |
import datetime | |
import numpy as np | |
import cv2 | |
from Tkinter import Tk | |
from tkFileDialog import askopenfilename | |
from scipy import ndimage | |
def main(): | |
print "Doing motion detection on a single video...." | |
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing | |
# user selects a file in a directory of videos | |
# the python script then knows where to process files | |
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file | |
print(filename) | |
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') | |
# print os.path.basename(filename) | |
# print os.path.dirname(filename) | |
# go to the directory | |
os.chdir(os.path.dirname(filename)) | |
simpleFile = os.path.basename(filename) | |
# find the 201* part of the 2014 and then move along 2 to get to the year part (e.g. 14) | |
startpos = simpleFile.find('201') + 2 | |
# the time part we are interested in is 17 characters long | |
endpos = startpos + 17 | |
datetimeString = simpleFile[startpos:endpos] | |
outputdir = datetimeString + "_res" | |
if not os.path.exists(outputdir): | |
os.makedirs(outputdir) | |
#just a check | |
print datetimeString | |
# get a list of all the files | |
import glob | |
allfiles = glob.glob("*.wmv") | |
fmt = '%y-%m-%d_%H-%M-%S' | |
# just in case we want to delay processing for when we are asleep | |
#sleep(43200) | |
fd = open('videotime.csv','a') | |
#fd.write('Date, Total sum, All pixels, All>100, All>200, All>240\n') | |
fd.write('Date, total movement, total boxed area, number of blobs\n') | |
f = simpleFile | |
print "Doing " + f | |
datetimeString = f[startpos:endpos] | |
date_object = datetime.datetime.strptime(datetimeString, fmt) | |
print date_object | |
# open the video processing stuff | |
cap = cv2.VideoCapture(f) | |
if cv2.__version__=='2.4.6': | |
fourcc = cv2.cv.CV_FOURCC(*'XVID') | |
else: | |
fourcc = cv2.VideoWriter_fourcc(*'XVID') | |
out = cv2.VideoWriter(outputdir + '/output_blobs.avi',fourcc, 20.0, (640,480)) | |
count = 0 | |
timeVideo = 0 | |
#speed = 1 # very fast | |
#speed = 50 # quite fast | |
#speed = 100 # medium | |
speed = 100 | |
#print cv2.__version__ | |
if int(major_ver) < 3 : | |
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS) | |
print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps) | |
else : | |
fps = cap.get(cv2.CAP_PROP_FPS) | |
print "Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps) | |
#extract the background | |
if cv2.__version__=='2.4.6': | |
print "Using opencv2 version 2.4.6" | |
fgbg = cv2.BackgroundSubtractorMOG() | |
elif cv2.__version__=='3.0.0': | |
print "Using opencv2 version 3.0.0" | |
fgbg = cv2.createBackgroundSubtractorKNN() | |
else: | |
print "Using opencv2 untested version " | |
fgbg = cv2.createBackgroundSubtractorKNN() | |
# set up an array for tracking | |
track_res = np.zeros((480, 640), dtype=np.uint8) | |
track_res.astype(int) | |
track_res_total = np.zeros((480, 640), dtype=np.uint8) | |
track_res_total.astype(int) | |
track_res_trim = track_res | |
#create some empty image arrays | |
dilated_image = track_res_trim | |
blurred_image = track_res_trim | |
#average_image = track_res_trim | |
average_image = np.zeros((480, 640, 3), dtype=np.uint8) | |
average_image.astype(int) | |
bt = average_image | |
avg_image_write_count = 0 | |
# set up the screen | |
triangle = np.array([[620,0], [0,480], [0,0] ], np.int32) | |
triangle2 = np.array([[0,0], [620,0], [640,480] ], np.int32) | |
#bottom right | |
triangle3 = np.array([[300,480], [520,380], [640,480] ], np.int32) | |
#triangle3 = np.array([[400,480], [520,380], [640,480] ], np.int32) | |
#bottom left | |
triangle4 = np.array([[0,480], [120,380], [350,480] ], np.int32) | |
#triangle4 = np.array([[0,480], [120,380], [240,480] ], np.int32) | |
# initialise variables | |
count = 0 | |
total_summ = 0 | |
img_count = 0 | |
img_write = 0 | |
xbox1=0 | |
xbox2=0 | |
ybox1=0 | |
ybox2 = 0 | |
xstart_new=0 | |
ystart_new=0 | |
xstop_new=0 | |
ystop_new=0 | |
unified = [] | |
while(cap.isOpened()): | |
# read the frame | |
ret, frame1 = cap.read() | |
#if count==0: | |
#average_image = frame1 | |
#low_values_indices = average_image >= 0 | |
#average_image[low_values_indices] = 0 | |
#bt = average_image | |
if ret==False: | |
print "End of video" | |
break; | |
# apply the mask | |
fgmask1 = fgbg.apply(frame1) | |
# convert to grayscale | |
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) | |
# read the next frame | |
ret, frame2 = cap.read() | |
if ret==False: | |
print "End of video" | |
break; | |
# convert to grayscale | |
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY) | |
# apply the mask | |
fgmask2 = fgbg.apply(frame2) | |
# look at the difference between the two frames | |
# this removes any permanent white blobs that may appear | |
gray_res = fgmask2 - fgmask1 | |
#contours, hierarchy = cv2.findContours(fgmask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) | |
#if len(contours)>0: | |
#c = max(contours, key=cv2.contourArea) | |
#((x, y), radius) = cv2.minEnclosingCircle(c) | |
#M = cv2.moments(c) | |
#center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) | |
####if radius > 1: | |
####cv2.circle(gray_res, (int(x), int(y)), int(radius),(255, 0, 0), -1) | |
#cv2.circle(frame1, center, 5, (200, 200, 200), -1) | |
#cv2.drawContours(frame1, contours, -1, (255,255,255), 3) | |
cv2.fillConvexPoly(gray_res, triangle, 0) | |
cv2.fillConvexPoly(gray_res, triangle2, 0) | |
cv2.fillConvexPoly(gray_res, triangle3, 0) | |
cv2.fillConvexPoly(gray_res, triangle4, 0) | |
# draw it on frame1 | |
#cv2.fillConvexPoly(frame1, triangle, 0) | |
#cv2.fillConvexPoly(frame1, triangle2, 0) | |
#cv2.fillConvexPoly(frame1, triangle3, 0) | |
#cv2.fillConvexPoly(frame1, triangle4, 0) | |
#threshold the result | |
low_values_indices = gray_res < 25 | |
gray_res[low_values_indices] = 0 | |
# mask the second frame | |
gray2[low_values_indices] = 0 | |
# remove any noise | |
denoised = ndimage.median_filter(gray2, 4) | |
# get the high value positions | |
high_values_indices = gray2 > 0 | |
#use denoised fror a smoother result | |
#high_values_indices = denoised > 0 | |
# increment the result array | |
#track_res[high_values_indices] = track_res[high_values_indices] + 10 | |
# limit the max amount | |
#low_values_indices = track_res >= 245 | |
track_res[high_values_indices] = 240 | |
track_res_total[high_values_indices] = track_res_total[high_values_indices] + 15 | |
low_values_indices = track_res_total >= 235 | |
track_res_total[low_values_indices] = 230 | |
#high_values_indices = track_res > 0 | |
#high_values_indices = gray_res > 0 | |
frame1[high_values_indices] = 240 | |
# show the result array as a frame - nice to look at | |
cv2.putText(frame1, str(date_object), (50,50), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255,255,255)) | |
#incrementing the time depends on the frames per second and how the frequency | |
#of the recording of those frames - so it's not so straightforward | |
#date_object = date_object + datetime.timedelta(0,0,333333) | |
date_object = date_object + datetime.timedelta(0,0,4120000/fps) | |
#cv2.imshow('frame1',frame1) | |
#cv2.rectangle(frame1, (xbox1,ybox1), (xbox2,ybox2), (255,255,255), 3) | |
# move on | |
k = cv2.waitKey(speed) | |
# a measure of activity, not generally used | |
total_summ = (gray_res.sum() + total_summ)/2 | |
if k & 0xFF == ord('q'): | |
break | |
count = count + 1 | |
img_write = img_write + 1 | |
if img_write==4: | |
#let's trim off the empty zeros | |
#find non zero elements | |
non_zero_res = np.argwhere(track_res) | |
if len(non_zero_res)>0: | |
#find start and end positions of those elements | |
(ystart, xstart), (ystop, xstop) = non_zero_res.min(0), non_zero_res.max(0) + 1 | |
print "X spread: " + str(xstop-xstart) | |
print "Y spread: " + str(ystop-ystart) | |
print "X start " + str(xstart) | |
print "X end " + str(xstop) | |
print "Y start " + str(ystart) | |
print "Y end " + str(ystop) | |
#trim off 10 percent | |
(xstart_new, xstop_new, ystart_new, ystop_new) = trimArray(track_res, 10) | |
#trim it | |
track_res_trim = np.zeros((480, 640), dtype=np.uint8) | |
track_res_trim[ystart_new:ystop_new, xstart_new:xstop_new] = track_res[ystart_new:ystop_new, xstart_new:xstop_new] | |
#track_res_trim = track_res | |
kernel = np.ones((2,2),'uint8') | |
dilated_image = cv2.dilate(track_res_trim, kernel) | |
blurred_image = cv2.GaussianBlur(dilated_image,(5,5),0) | |
track_res_trim = dilated_image | |
#findContours modifies the image | |
if cv2.__version__=='2.4.6': | |
contours, hierarchy = cv2.findContours(track_res_trim,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) | |
else: | |
_ , contours, hierarchy = cv2.findContours(track_res_trim,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) | |
unified = groupContours(contours,30) | |
#unified = groupContours(combined,20) | |
xbox1 = xstart_new | |
ybox1 = ystart_new | |
xbox2 = xstop_new | |
ybox2 = ystop_new | |
# draw a rectangle round it | |
#cv2.rectangle(frame1, (xstart_new,ystart_new), (xstop_new,ystop_new), (255,255,255), 1) | |
if avg_image_write_count<4: | |
avg_image_write_count = avg_image_write_count + 1 | |
bt = np.zeros((480, 640, 3), dtype=np.uint8) | |
low_values_indices = blurred_image > 0 | |
bt[low_values_indices] = 250 | |
average_image = average_image/2 + bt/2 | |
average_image_out = average_image | |
#cv2.rectangle(average_image_out, (xstart_new,ystart_new), (xstop_new,ystop_new), (255,255,255), 1) | |
if len(unified)>0: | |
for c in unified: | |
rect = cv2.boundingRect(c) | |
x,y,w,h = rect | |
cv2.rectangle(average_image_out, (x,y), (x+w,y+h), (255,255,255), 1) | |
out.write(average_image_out) | |
out.write(average_image_out) | |
out.write(average_image_out) | |
out.write(average_image_out) | |
else: | |
#cv2.imwrite(outputdir + "/result" + str(img_count) + ".jpg", average_image) | |
avg_image_write_count=0 | |
average_image_out = average_image | |
#cv2.rectangle(average_image_out, (xstart_new,ystart_new), (xstop_new,ystop_new), (255,255,255), 1) | |
if len(unified)>0: | |
for c in unified: | |
rect = cv2.boundingRect(c) | |
x,y,w,h = rect | |
cv2.rectangle(average_image_out, (x,y), (x+w,y+h), (255,255,255), 1) | |
out.write(average_image_out) | |
out.write(average_image_out) | |
out.write(average_image_out) | |
out.write(average_image_out) | |
area = (long)((xstop_new-xstart_new)*(ystop_new-ystart_new)) | |
fd.write(str(date_object)) | |
fd.write(',') | |
activity_countx = np.count_nonzero(track_res) | |
fd.write(str(activity_countx)) | |
fd.write(',') | |
fd.write(str(area)) | |
fd.write(',') | |
fd.write(str(len(unified))) | |
fd.write('\n') | |
track_res = np.zeros((480, 640), dtype=np.uint8) | |
img_count = img_count + 1 | |
img_write=0 | |
if len(unified)>0: | |
cv2.rectangle(frame1, (xstart_new,ystart_new), (xstop_new,ystop_new), (255,255,255), 1) | |
for c in unified: | |
rect = cv2.boundingRect(c) | |
x,y,w,h = rect | |
cv2.rectangle(frame1, (x,y), (x+w,y+h), (255,255,255), 3) | |
cv2.drawContours(frame1, unified, -1, (255,255,255), 3) | |
cv2.imshow('frame1',frame1) | |
# write out the results | |
cv2.imwrite(outputdir + "/result_total.jpg", track_res_total) | |
low_values_indices = track_res_total < 1 | |
track_res_total[low_values_indices] = 0 | |
activity_count = np.count_nonzero(track_res_total) | |
# writing out the data | |
# different measures of activity | |
#fd.write(str(date_object)) | |
#fd.write(',') | |
#fd.write(str(total_summ)) | |
#fd.write(',') | |
# this is the most useful one | |
#fd.write(str(activity_count)) | |
# look at different activity levels | |
low_values_indices = track_res_total <= 100 | |
track_res_total[low_values_indices] = 0 | |
activity_countx = np.count_nonzero(track_res_total) | |
#fd.write(',') | |
#fd.write(str(activity_countx)) | |
# look at different activity levels | |
low_values_indices = track_res_total <= 200 | |
track_res_total[low_values_indices] = 0 | |
activity_count2 = np.count_nonzero(track_res_total) | |
#fd.write(',') | |
#fd.write(str(activity_count2)) | |
# look at different activity levels | |
low_values_indices = track_res_total < 240 | |
track_res_total[low_values_indices] = 0 | |
activity_count3 = np.count_nonzero(track_res_total) | |
#fd.write(',') | |
#fd.write(str(activity_count3)) | |
#fd.write('\n') | |
#np.savetxt("foo2.csv", track_res_total, delimiter=",") | |
# the end | |
if exit == True: | |
print "Quitting analysis" | |
cap.release() | |
out.release() | |
cv2.destroyAllWindows() | |
print "Finished" | |
# end | |
fd.close() | |
def groupContours(contours, threshold): | |
if len(contours)>0: | |
LENGTH = len(contours) | |
status = np.zeros((LENGTH,1)) | |
for i,contour1 in enumerate(contours): | |
x = i | |
if i != LENGTH-1: | |
for j,contour2 in enumerate(contours[i+1:]): | |
x = x+1 | |
dist = find_if_close(contour1,contour2,threshold) | |
if dist == True: | |
val = min(status[i],status[x]) | |
status[x] = status[i] = val | |
else: | |
if status[x]==status[i]: | |
status[x] = i+1 | |
combined = [] | |
maximum = int(status.max())+1 | |
for i in xrange(maximum): | |
pos = np.where(status==i)[0] | |
if pos.size != 0: | |
cont = np.vstack(contours[i] for i in pos) | |
hull = cv2.convexHull(cont) | |
combined.append(hull) | |
return combined | |
def find_if_close(contour1, contour2, distance): | |
row1,row2 = contour1.shape[0],contour2.shape[0] | |
for i in xrange(row1): | |
for j in xrange(row2): | |
dist = np.linalg.norm(contour1[i]-contour2[j]) | |
if abs(dist) < distance : | |
return True | |
elif i==row1-1 and j==row2-1: | |
return False | |
def trimArray(arrayIn, percent): | |
print percent | |
# trim an array by a specified percent by fraction of 'mass' | |
total_sum = arrayIn.sum() | |
#print "Total sum is " + str(total_sum) | |
#do the x axis first | |
rows_sum = arrayIn.sum(0) | |
x_cum_sum = rows_sum.cumsum() | |
cut_res = np.argwhere(x_cum_sum>percent*total_sum/100) | |
xstart = cut_res.min(0) | |
print "New X start is " + str(xstart) | |
#reverse it | |
rev_array = np.fliplr([rows_sum])[0] | |
x_cum_sum = rev_array.cumsum() | |
ten_percent_res = np.argwhere(x_cum_sum>percent*total_sum/100) | |
xfinish = ten_percent_res.min(0) | |
xfinish = len(rev_array) - xfinish | |
print "New X end is " + str(xfinish) | |
#do the y axis | |
rows_sum = arrayIn.sum(1) | |
y_cum_sum = rows_sum.cumsum() | |
cut_res = np.argwhere(y_cum_sum>percent*total_sum/100) | |
ystart = cut_res.min(0) | |
print "New Y start is " + str(ystart) | |
#reverse it | |
rev_array = np.fliplr([rows_sum])[0] | |
y_cum_sum = rev_array.cumsum() | |
ten_percent_res = np.argwhere(y_cum_sum>percent*total_sum/100) | |
yfinish = ten_percent_res.min(0) | |
yfinish = len(rev_array) - yfinish | |
print "New Y end is " + str(yfinish) | |
return (xstart, xfinish, ystart, yfinish) | |
if __name__=="__main__": | |
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# looking at motion detection on a specific area of a 640x480 video recording | |
# we record any motion detected and output images every n=60 frames or so (you can change this) | |
# we also record a csv file of total movement | |
# NOTE, it's all a bit hard coded because only I'm using it currently | |
import os | |
import datetime | |
import numpy as np | |
import cv2 | |
from Tkinter import Tk | |
from tkFileDialog import askopenfilename | |
from scipy import ndimage | |
def main(): | |
print "Doing motion detection on a single video...." | |
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing | |
# user selects a file in a directory of videos | |
# the python script then knows where to process files | |
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file | |
print(filename) | |
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') | |
# print os.path.basename(filename) | |
# print os.path.dirname(filename) | |
# go to the directory | |
os.chdir(os.path.dirname(filename)) | |
simpleFile = os.path.basename(filename) | |
# find the 201* part of the 2014 and then move along 2 to get to the year part (e.g. 14) | |
startpos = simpleFile.find('201') + 2 | |
# the time part we are interested in is 17 characters long | |
endpos = startpos + 17 | |
datetimeString = simpleFile[startpos:endpos] | |
outputdir = datetimeString + "_res" | |
if not os.path.exists(outputdir): | |
os.makedirs(outputdir) | |
#just a check | |
print datetimeString | |
# get a list of all the files | |
import glob | |
allfiles = glob.glob("*.wmv") | |
fmt = '%y-%m-%d_%H-%M-%S' | |
# just in case we want to delay processing for when we are asleep | |
#sleep(43200) | |
fd = open('videotime.csv','a') | |
#fd.write('Date, Total sum, All pixels, All>100, All>200, All>240\n') | |
fd.write('Date, total movement, total boxed area, number of blobs\n') | |
f = simpleFile | |
print "Doing " + f | |
datetimeString = f[startpos:endpos] | |
date_object = datetime.datetime.strptime(datetimeString, fmt) | |
print date_object | |
# open the video processing stuff | |
cap = cv2.VideoCapture(f) | |
if cv2.__version__=='2.4.6': | |
fourcc = cv2.cv.CV_FOURCC(*'XVID') | |
else: | |
fourcc = cv2.VideoWriter_fourcc(*'XVID') | |
out = cv2.VideoWriter(outputdir + '/output_blobs.avi',fourcc, 20.0, (640,480)) | |
count = 0 | |
timeVideo = 0 | |
#speed = 1 # very fast | |
#speed = 50 # quite fast | |
#speed = 100 # medium | |
speed = 100 | |
#print cv2.__version__ | |
if int(major_ver) < 3 : | |
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS) | |
print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps) | |
else : | |
fps = cap.get(cv2.CAP_PROP_FPS) | |
print "Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps) | |
#extract the background | |
if cv2.__version__=='2.4.6': | |
print "Using opencv2 version 2.4.6" | |
fgbg = cv2.BackgroundSubtractorMOG() | |
elif cv2.__version__=='3.0.0': | |
print "Using opencv2 version 3.0.0" | |
fgbg = cv2.createBackgroundSubtractorKNN() | |
else: | |
print "Using opencv2 untested version " | |
fgbg = cv2.createBackgroundSubtractorKNN() | |
# set up an array for tracking | |
track_res = np.zeros((480, 640), dtype=np.uint8) | |
track_res.astype(int) | |
track_res_total = np.zeros((480, 640), dtype=np.uint8) | |
track_res_total.astype(int) | |
track_res_trim = track_res | |
#create some empty image arrays | |
dilated_image = track_res_trim | |
blurred_image = track_res_trim | |
#average_image = track_res_trim | |
average_image = np.zeros((480, 640, 3), dtype=np.uint8) | |
average_image.astype(int) | |
bt = average_image | |
avg_image_write_count = 0 | |
# set up the screen | |
triangle = np.array([[620,0], [0,480], [0,0] ], np.int32) | |
triangle2 = np.array([[0,0], [620,0], [640,480] ], np.int32) | |
#bottom right | |
triangle3 = np.array([[300,480], [520,380], [640,480] ], np.int32) | |
#triangle3 = np.array([[400,480], [520,380], [640,480] ], np.int32) | |
#bottom left | |
triangle4 = np.array([[0,480], [120,380], [350,480] ], np.int32) | |
#triangle4 = np.array([[0,480], [120,380], [240,480] ], np.int32) | |
# initialise variables | |
count = 0 | |
total_summ = 0 | |
img_count = 0 | |
img_write = 0 | |
xbox1=0 | |
xbox2=0 | |
ybox1=0 | |
ybox2 = 0 | |
xstart_new=0 | |
ystart_new=0 | |
xstop_new=0 | |
ystop_new=0 | |
unified = [] | |
while(cap.isOpened()): | |
# read the frame | |
ret, frame1 = cap.read() | |
#if count==0: | |
#average_image = frame1 | |
#low_values_indices = average_image >= 0 | |
#average_image[low_values_indices] = 0 | |
#bt = average_image | |
if ret==False: | |
print "End of video" | |
break; | |
# apply the mask | |
fgmask1 = fgbg.apply(frame1) | |
# convert to grayscale | |
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) | |
# read the next frame | |
ret, frame2 = cap.read() | |
if ret==False: | |
print "End of video" | |
break; | |
# convert to grayscale | |
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY) | |
# apply the mask | |
fgmask2 = fgbg.apply(frame2) | |
# look at the difference between the two frames | |
# this removes any permanent white blobs that may appear | |
gray_res = fgmask2 - fgmask1 | |
#contours, hierarchy = cv2.findContours(fgmask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) | |
#if len(contours)>0: | |
#c = max(contours, key=cv2.contourArea) | |
#((x, y), radius) = cv2.minEnclosingCircle(c) | |
#M = cv2.moments(c) | |
#center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) | |
####if radius > 1: | |
####cv2.circle(gray_res, (int(x), int(y)), int(radius),(255, 0, 0), -1) | |
#cv2.circle(frame1, center, 5, (200, 200, 200), -1) | |
#cv2.drawContours(frame1, contours, -1, (255,255,255), 3) | |
cv2.fillConvexPoly(gray_res, triangle, 0) | |
cv2.fillConvexPoly(gray_res, triangle2, 0) | |
cv2.fillConvexPoly(gray_res, triangle3, 0) | |
cv2.fillConvexPoly(gray_res, triangle4, 0) | |
# draw it on frame1 | |
#cv2.fillConvexPoly(frame1, triangle, 0) | |
#cv2.fillConvexPoly(frame1, triangle2, 0) | |
#cv2.fillConvexPoly(frame1, triangle3, 0) | |
#cv2.fillConvexPoly(frame1, triangle4, 0) | |
#threshold the result | |
low_values_indices = gray_res < 25 | |
gray_res[low_values_indices] = 0 | |
# mask the second frame | |
gray2[low_values_indices] = 0 | |
# remove any noise | |
denoised = ndimage.median_filter(gray2, 4) | |
# get the high value positions | |
high_values_indices = gray2 > 0 | |
#use denoised fror a smoother result | |
#high_values_indices = denoised > 0 | |
# increment the result array | |
#track_res[high_values_indices] = track_res[high_values_indices] + 10 | |
# limit the max amount | |
#low_values_indices = track_res >= 245 | |
track_res[high_values_indices] = 240 | |
track_res_total[high_values_indices] = track_res_total[high_values_indices] + 15 | |
low_values_indices = track_res_total >= 235 | |
track_res_total[low_values_indices] = 230 | |
#high_values_indices = track_res > 0 | |
#high_values_indices = gray_res > 0 | |
frame1[high_values_indices] = 240 | |
# show the result array as a frame - nice to look at | |
cv2.putText(frame1, str(date_object), (50,50), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255,255,255)) | |
#incrementing the time depends on the frames per second and how the frequency | |
#of the recording of those frames - so it's not so straightforward | |
#date_object = date_object + datetime.timedelta(0,0,333333) | |
date_object = date_object + datetime.timedelta(0,0,4120000/fps) | |
#cv2.imshow('frame1',frame1) | |
#cv2.rectangle(frame1, (xbox1,ybox1), (xbox2,ybox2), (255,255,255), 3) | |
# move on | |
k = cv2.waitKey(speed) | |
# a measure of activity, not generally used | |
total_summ = (gray_res.sum() + total_summ)/2 | |
if k & 0xFF == ord('q'): | |
break | |
count = count + 1 | |
img_write = img_write + 1 | |
#cv2.imshow('frame1',frame1) | |
cv2.imshow('trackres', track_res_total) | |
# write out the results | |
cv2.imwrite(outputdir + "/result_total.jpg", track_res_total) | |
low_values_indices = track_res_total < 1 | |
track_res_total[low_values_indices] = 0 | |
activity_count = np.count_nonzero(track_res_total) | |
# writing out the data | |
# different measures of activity | |
#fd.write(str(date_object)) | |
#fd.write(',') | |
#fd.write(str(total_summ)) | |
#fd.write(',') | |
# this is the most useful one | |
#fd.write(str(activity_count)) | |
# look at different activity levels | |
low_values_indices = track_res_total <= 100 | |
track_res_total[low_values_indices] = 0 | |
activity_countx = np.count_nonzero(track_res_total) | |
#fd.write(',') | |
#fd.write(str(activity_countx)) | |
# look at different activity levels | |
low_values_indices = track_res_total <= 200 | |
track_res_total[low_values_indices] = 0 | |
activity_count2 = np.count_nonzero(track_res_total) | |
#fd.write(',') | |
#fd.write(str(activity_count2)) | |
# look at different activity levels | |
low_values_indices = track_res_total < 240 | |
track_res_total[low_values_indices] = 0 | |
activity_count3 = np.count_nonzero(track_res_total) | |
#fd.write(',') | |
#fd.write(str(activity_count3)) | |
#fd.write('\n') | |
#np.savetxt("foo2.csv", track_res_total, delimiter=",") | |
# the end | |
if exit == True: | |
print "Quitting analysis" | |
cap.release() | |
out.release() | |
cv2.destroyAllWindows() | |
print "Finished" | |
# end | |
fd.close() | |
def groupContours(contours, threshold): | |
if len(contours)>0: | |
LENGTH = len(contours) | |
status = np.zeros((LENGTH,1)) | |
for i,contour1 in enumerate(contours): | |
x = i | |
if i != LENGTH-1: | |
for j,contour2 in enumerate(contours[i+1:]): | |
x = x+1 | |
dist = find_if_close(contour1,contour2,threshold) | |
if dist == True: | |
val = min(status[i],status[x]) | |
status[x] = status[i] = val | |
else: | |
if status[x]==status[i]: | |
status[x] = i+1 | |
combined = [] | |
maximum = int(status.max())+1 | |
for i in xrange(maximum): | |
pos = np.where(status==i)[0] | |
if pos.size != 0: | |
cont = np.vstack(contours[i] for i in pos) | |
hull = cv2.convexHull(cont) | |
combined.append(hull) | |
return combined | |
def find_if_close(contour1, contour2, distance): | |
row1,row2 = contour1.shape[0],contour2.shape[0] | |
for i in xrange(row1): | |
for j in xrange(row2): | |
dist = np.linalg.norm(contour1[i]-contour2[j]) | |
if abs(dist) < distance : | |
return True | |
elif i==row1-1 and j==row2-1: | |
return False | |
def trimArray(arrayIn, percent): | |
print percent | |
# trim an array by a specified percent by fraction of 'mass' | |
total_sum = arrayIn.sum() | |
#print "Total sum is " + str(total_sum) | |
#do the x axis first | |
rows_sum = arrayIn.sum(0) | |
x_cum_sum = rows_sum.cumsum() | |
cut_res = np.argwhere(x_cum_sum>percent*total_sum/100) | |
xstart = cut_res.min(0) | |
print "New X start is " + str(xstart) | |
#reverse it | |
rev_array = np.fliplr([rows_sum])[0] | |
x_cum_sum = rev_array.cumsum() | |
ten_percent_res = np.argwhere(x_cum_sum>percent*total_sum/100) | |
xfinish = ten_percent_res.min(0) | |
xfinish = len(rev_array) - xfinish | |
print "New X end is " + str(xfinish) | |
#do the y axis | |
rows_sum = arrayIn.sum(1) | |
y_cum_sum = rows_sum.cumsum() | |
cut_res = np.argwhere(y_cum_sum>percent*total_sum/100) | |
ystart = cut_res.min(0) | |
print "New Y start is " + str(ystart) | |
#reverse it | |
rev_array = np.fliplr([rows_sum])[0] | |
y_cum_sum = rev_array.cumsum() | |
ten_percent_res = np.argwhere(y_cum_sum>percent*total_sum/100) | |
yfinish = ten_percent_res.min(0) | |
yfinish = len(rev_array) - yfinish | |
print "New Y end is " + str(yfinish) | |
return (xstart, xfinish, ystart, yfinish) | |
if __name__=="__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment