View loadvgg.py
#How to load the model
def build_model(img_width=224, img_height=224):
from keras.models import Sequential
from keras.layers import Convolution2D, ZeroPadding2D, MaxPooling2D, Activation
model = Sequential()
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,img_width,img_height)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(Activation('relu'))
View s3_multipart_upload.py
import logging
import math
import mimetypes
from multiprocessing import Pool
import os
from boto.s3.connection import S3Connection
from filechunkio import FileChunkIO
View s3_multipart_upload.py
#!/usr/bin/env python
"""Split large file into multiple pieces for upload to S3.
S3 only supports 5Gb files for uploading directly, so for larger CloudBioLinux
box images we need to use boto's multipart file support.
This parallelizes the task over available cores using multiprocessing.
Usage:
s3_multipart_upload.py <file_to_transfer> <bucket_name> [<s3_key_name>]
View install inotify-tools on centos.sh
wget http://cloud.github.com/downloads/rvoicilas/inotify-tools/inotify-tools-3.14.tar.gz
tar -zxvf inotify-tools-3.14.tar.gz
cd inotify-tools-3.14
./configure
make
make install
updatedb
View gist:07a8ada63d56d847c9c8
ls | awk '{print "[](results/" $1 ")"}'
View Blogs to follow
http://antirez.com/latest/0
View vimrc_backup
set backup
175 set backupdir=~/.vim-tmp,~/.tmp,~/tmp,/var/tmp,/tmp
176 set backupskip=/tmp/*,/private/tmp/*
177 set directory=~/.vim-tmp,~/.tmp,~/tmp,/var/tmp,/tmp
178 set writebackup
View Extract patches from an image
import os
from optparse import OptionParser
import cPickle
import skimage
import numpy as np
from scipy import misc
from distutils.dir_util import mkpath
def loadImage(imgpath):
try:
View python scipy install instruction
sudo apt-get install gfortran libopenblas-dev liblapack-dev
sudo pip install numpy scipy