Created
August 25, 2016 19:04
-
-
Save czarrar/adb275a070d30a783df65c10c590adad to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "code", | |
"execution_count": 13, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"%matplotlib inline\n", | |
"from pathlib import Path\n", | |
"import menpo.io as mio\n", | |
"import os\n", | |
"import numpy as np" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 2, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"# Modified landmark resolver for 300VW\n", | |
"def vid_landmark_resolver(path, frame_number, paths_callable=mio.landmark_file_paths): \n", | |
" # pattern finding all landmarks with the same stem\n", | |
" pattern = path.with_name('annot/%06i.*' % (frame_number + 1))\n", | |
" # find all the assets we can with this name. Key is extension\n", | |
" return {p.suffix[1:].upper(): p for p in paths_callable(pattern)}" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Load the training data" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 6, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"0\n", | |
"1\n", | |
"2\n", | |
"3\n", | |
"4\n", | |
"5\n", | |
"6\n", | |
"7\n", | |
"8\n", | |
"9\n", | |
"10\n", | |
"11\n", | |
"12\n", | |
"13\n", | |
"14\n", | |
"15\n", | |
"16\n", | |
"17\n", | |
"18\n", | |
"19\n", | |
"20\n", | |
"21\n", | |
"22\n", | |
"23\n", | |
"24\n", | |
"25\n", | |
"26\n", | |
"27\n", | |
"28\n", | |
"29\n", | |
"30\n", | |
"31\n", | |
"32\n", | |
"33\n", | |
"34\n", | |
"35\n" | |
] | |
} | |
], | |
"source": [ | |
"#n = len(os.listdir('/projects/face_databases/300VW'))\n", | |
"videos = []\n", | |
"for j,video in enumerate(mio.import_videos('/projects/face_databases/300VW/0*/vid.avi', landmark_resolver=vid_landmark_resolver)):\n", | |
" print(j)\n", | |
" frames = []\n", | |
" for ii in range(0,len(video),5):\n", | |
" i = video[ii]\n", | |
" # crop image\n", | |
" i = i.crop_to_landmarks_proportion(0.1)\n", | |
" # convert it to grayscale if needed\n", | |
" if i.n_channels == 3:\n", | |
" i = i.as_greyscale(mode='luminosity')\n", | |
" frames.append(i)\n", | |
" videos.append(frames)" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"At this point, the process is using 18.3% of the 32gb of memory" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 14, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"from menpofit.aam import PatchAAM\n", | |
"from menpo.feature import double_igo, fast_dsift, hog\n", | |
"from menpofit.aam import LucasKanadeAAMFitter" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 15, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"# Concatenate the videos together\n", | |
"import itertools\n", | |
"training_frames = list(itertools.chain(*videos))" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 17, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"data": { | |
"text/plain": [ | |
"12656" | |
] | |
}, | |
"execution_count": 17, | |
"metadata": {}, | |
"output_type": "execute_result" | |
} | |
], | |
"source": [ | |
"# Check number of total frames\n", | |
"len(training_frames)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 18, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"# Remove the videos list\n", | |
"del videos" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 19, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"- Computing reference shape Computing batch 0\n", | |
"- Building models\n", | |
" - Scale 0: Building appearance model " | |
] | |
}, | |
{ | |
"ename": "MemoryError", | |
"evalue": "", | |
"output_type": "error", | |
"traceback": [ | |
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", | |
"\u001b[1;31mMemoryError\u001b[0m Traceback (most recent call last)", | |
"\u001b[1;32m<ipython-input-19-93b0d4a10796>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mholistic_features\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mfast_dsift\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[0mdiagonal\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m150\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 7\u001b[1;33m \u001b[0mscales\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0.25\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m0.5\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m0.75\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m1.0\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m1.25\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 8\u001b[0m )\n\u001b[0;32m 9\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", | |
"\u001b[1;32m/home/zshehzad/.conda/envs/menpo/lib/python2.7/site-packages/menpofit/aam/base.pyc\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, images, group, holistic_features, reference_shape, diagonal, scales, patch_shape, patch_normalisation, shape_model_cls, max_shape_components, max_appearance_components, verbose, batch_size)\u001b[0m\n\u001b[0;32m 1137\u001b[0m \u001b[0mmax_shape_components\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmax_shape_components\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1138\u001b[0m \u001b[0mmax_appearance_components\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmax_appearance_components\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1139\u001b[1;33m shape_model_cls=shape_model_cls, batch_size=batch_size)\n\u001b[0m\u001b[0;32m 1140\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1141\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", | |
"\u001b[1;32m/home/zshehzad/.conda/envs/menpo/lib/python2.7/site-packages/menpofit/aam/base.pyc\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, images, group, holistic_features, reference_shape, diagonal, scales, transform, shape_model_cls, max_shape_components, max_appearance_components, verbose, batch_size)\u001b[0m\n\u001b[0;32m 137\u001b[0m \u001b[1;31m# Train AAM\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 138\u001b[0m self._train(images, increment=False, group=group, verbose=verbose,\n\u001b[1;32m--> 139\u001b[1;33m batch_size=batch_size)\n\u001b[0m\u001b[0;32m 140\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 141\u001b[0m def _train(self, images, increment=False, group=None,\n", | |
"\u001b[1;32m/home/zshehzad/.conda/envs/menpo/lib/python2.7/site-packages/menpofit/aam/base.pyc\u001b[0m in \u001b[0;36m_train\u001b[1;34m(self, images, increment, group, shape_forgetting_factor, appearance_forgetting_factor, verbose, batch_size)\u001b[0m\n\u001b[0;32m 181\u001b[0m \u001b[0mshape_forgetting_factor\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mshape_forgetting_factor\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 182\u001b[0m \u001b[0mappearance_forgetting_factor\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mappearance_forgetting_factor\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 183\u001b[1;33m verbose=verbose)\n\u001b[0m\u001b[0;32m 184\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 185\u001b[0m def _train_batch(self, image_batch, increment=False, group=None,\n", | |
"\u001b[1;32m/home/zshehzad/.conda/envs/menpo/lib/python2.7/site-packages/menpofit/aam/base.pyc\u001b[0m in \u001b[0;36m_train_batch\u001b[1;34m(self, image_batch, increment, group, verbose, shape_forgetting_factor, appearance_forgetting_factor)\u001b[0m\n\u001b[0;32m 256\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 257\u001b[0m \u001b[1;32mif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mincrement\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 258\u001b[1;33m \u001b[0mappearance_model\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mPCAModel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mwarped_images\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 259\u001b[0m \u001b[1;31m# trim appearance model if required\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 260\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmax_appearance_components\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", | |
"\u001b[1;32m/home/zshehzad/.conda/envs/menpo/lib/python2.7/site-packages/menpo/model/pca.pyc\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, samples, centre, n_samples, max_n_components, inplace, verbose)\u001b[0m\n\u001b[0;32m 1190\u001b[0m \u001b[1;31m# build a data matrix from all the samples\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1191\u001b[0m data, template = as_matrix(samples, length=n_samples,\n\u001b[1;32m-> 1192\u001b[1;33m return_template=True, verbose=verbose)\n\u001b[0m\u001b[0;32m 1193\u001b[0m \u001b[0mn_samples\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1194\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", | |
"\u001b[1;32m/home/zshehzad/.conda/envs/menpo/lib/python2.7/site-packages/menpo/math/linalg.pyc\u001b[0m in \u001b[0;36mas_matrix\u001b[1;34m(vectorizables, length, return_template, verbose)\u001b[0m\n\u001b[0;32m 131\u001b[0m \u001b[0mtemplate_vector\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtemplate\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_vector\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 132\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 133\u001b[1;33m \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlength\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mn_features\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtemplate_vector\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 134\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mverbose\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 135\u001b[0m print_dynamic('Allocated data matrix of size {} '\n", | |
"\u001b[1;31mMemoryError\u001b[0m: " | |
] | |
} | |
], | |
"source": [ | |
"aam_dsift = PatchAAM(\n", | |
" training_frames, \n", | |
" group='PTS',\n", | |
" verbose=True,\n", | |
" holistic_features=fast_dsift, \n", | |
" diagonal=150, \n", | |
" scales=(0.25, 0.5, 0.75, 1.0, 1.25)\n", | |
")\n", | |
"\n", | |
"fitter_dsift = LucasKanadeAAMFitter(aam_dsift, \n", | |
" n_shape=[3, 3, 8, 12, 12], \n", | |
" n_appearance=50)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [] | |
} | |
], | |
"metadata": { | |
"kernelspec": { | |
"display_name": "Python 2", | |
"language": "python", | |
"name": "python2" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 2 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython2", | |
"version": "2.7.12" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 0 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment