Skip to content

Instantly share code, notes, and snippets.

@madratman
Created March 30, 2016 06:45
Show Gist options
  • Save madratman/2c799f8023eeb7cb3910985b4d6f541b to your computer and use it in GitHub Desktop.
Save madratman/2c799f8023eeb7cb3910985b4d6f541b to your computer and use it in GitHub Desktop.
mport cPickle as pickle
from datetime import datetime
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from lasagne import layers
from sklearn.utils import shuffle
import theano
import theano.tensor as T
import lasagne
import skimage.transform
import sklearn.cross_validation
#import cv2
np.random.seed(42)
BATCH_SIZE = 16
DATA_DIR = '/home/ratneshm/projects/powerline_synthetic_data_generator/data/'
from lasagne.layers import DenseLayer
from lasagne.layers import DropoutLayer
from lasagne.layers import InputLayer
from lasagne.layers import LocalResponseNormalization2DLayer as NormLayer
from lasagne.layers import NonlinearityLayer
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
from lasagne.layers import MaxPool2DLayer as PoolLayer
from lasagne.nonlinearities import softmax
from lasagne.utils import floatX
from nolearn.lasagne import BatchIterator
from nolearn.lasagne import NeuralNet
def float32(k):
return np.cast['float32'](k)
net = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv1', ConvLayer),
('norm1', NormLayer),
('pool1', PoolLayer),
('conv2', ConvLayer),
('pool2', PoolLayer),
('conv3', ConvLayer),
('conv4', ConvLayer),
('conv5', ConvLayer),
('pool5', PoolLayer),
('fc6', DenseLayer),
('drop6', DropoutLayer),
('fc7', DenseLayer),
('drop7', DropoutLayer),
('fc8', DenseLayer),
],
input_shape=(None, 3, 224, 224),
conv1_num_filters=96, conv1_filter_size=(7, 7), conv1_stride=2,
pool1_pool_size=(3, 3), pool1_stride=3,
conv2_num_filters=256, conv2_filter_size=(5, 5),
pool2_pool_size=(2, 2), pool2_stride=2,
conv3_num_filters=512, conv3_filter_size=(3, 3), conv3_pad=1,
conv4_num_filters=512, conv4_filter_size=(3, 3), conv4_pad=1,
conv5_num_filters=512, conv5_filter_size=(3, 3), conv5_pad=1,
pool5_pool_size=(3, 3), pool5_stride=3,
fc6_num_units=4096,
drop6_p=0.5,
fc7_num_units=4096,
drop7_p=0.5,
fc8_num_units=4, fc8_nonlinearity=None,
update_learning_rate=theano.shared(float32(0.01)),
update_momentum=theano.shared(float32(0.9)),
regression=True,
batch_iterator_train=BatchIterator(batch_size=16),
#on_epoch_finished=[
# AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
# AdjustVariable('update_momentum', start=0.9, stop=0.999),
# EarlyStopping(patience=200),
# ],
max_epochs=3000,
verbose=1,
)
d_vggcnn = pickle.load(open('/home/ratneshm/projects/vgg_weights/vgg_cnn_s.pkl'))
# image_preprocess adapted from https://github.com/ebenolson/pydata2015/blob/master/3%20-%20Convolutional%20Networks/Finetuning%20for%20Image%20Classifica$
IMAGE_MEAN = d_vggcnn['mean image']
def image_preprocess(fn, ext='jpg'):
im = plt.imread(fn, ext)
# Resize so smallest dim = 256, preserving aspect ratio
h, w, _ = im.shape
if h < w:
im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True)
else:
im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True)
h, w, _ = im.shape
im = im[h//2-112:h//2+112, w//2-112:w//2+112] # Central crop to 224x224
im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1) # Shuffle axes to c01 (channel, height, width)
im = im[:3] # discard alpha channel if present
# im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) # Convert to BGR
im = im[::-1, :, :]
im = im - IMAGE_MEAN
return floatX(im[np.newaxis])
X = y = []
y = np.genfromtxt(DATA_DIR+'labels.csv', delimiter = ',') # labels #TODO scale to [-1,1]
for image_file in os.listdir(DATA_DIR + 'images/'):
im = image_preprocess(DATA_DIR + 'images/' + image_file)
X.append(im)
X = np.concatenate(X)
X = X.astype('float32')
y = y.astype('float32')
print("X.shape", X.shape)
print("y.shape", y.shape)
print(y)
######################train#################3
net.fit(X, y)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment