Skip to content

Instantly share code, notes, and snippets.

@takatakamanbou takatakamanbou/00_ex160310.md Secret
Last active Mar 16, 2016

Embed
What would you like to do?
train_net: "ex160310L.prototxt"
test_net: "ex160310V.prototxt"
test_iter: 100
test_interval: 500
base_lr: 0.01
momentum: 0.9
weight_decay: 0.0005
lr_policy: "inv"
gamma: 0.0001
power: 0.75
display: 500
max_iter: 10000
snapshot: 5000
snapshot_prefix: "ex160310"
solver_mode: GPU
#solver_mode: CPU
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
scale: 0.00390625
}
data_param {
source: "../mnist_train_lmdb"
batch_size: 64
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
inner_product_param {
num_output: 10
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
layer {
name: "data"
type: "Input"
top: "data"
input_param {
shape {
dim: 100
dim: 1
dim: 28
dim: 28
}
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
num_output: 20
kernel_size: 5
stride: 1
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
convolution_param {
num_output: 50
kernel_size: 5
stride: 1
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
inner_product_param {
num_output: 500
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
inner_product_param {
num_output: 10
}
}
layer {
name: "softmax"
type: "Softmax"
bottom: "ip2"
top: "softmax"
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
data_param {
source: "../mnist_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
num_output: 20
kernel_size: 5
stride: 1
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
convolution_param {
num_output: 50
kernel_size: 5
stride: 1
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
inner_product_param {
num_output: 500
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
inner_product_param {
num_output: 10
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
from __future__ import print_function
import caffe
from caffe import layers as caffeL
from caffe import params as caffeP
phase = dict(
L = dict( phase = caffe.TRAIN ),
V = dict( phase = caffe.TEST ),
T = dict( phase = caffe.TEST )
)
dirData = dict( L = '../mnist_train_lmdb', V = '../mnist_test_lmdb', T = '' )
bsize = dict( L = 64, V = 100, T = 100 )
params = dict(
L = dict(
param = [ dict( lr_mult = 1 ), dict( lr_mult = 2 ) ],
weight_filler = dict( type = 'xavier' ),
bias_filler = dict( type = 'constant' ),
),
V = dict(),
T = dict(),
)
### defining the CNN for learning (L), validation (V), test ( T )
#
def defineCNN( LT ):
cnn = caffe.NetSpec()
### data & label
if LT in [ 'L', 'V' ]:
cnn.data, cnn.label = caffeL.Data( source = dirData[LT], batch_size = bsize[LT], backend = caffeP.Data.LMDB, ntop = 2, include = phase[LT], transform_param = dict( scale = 1.0/256 ) )
else:
cnn.data = caffeL.Input( shape = dict( dim = [bsize[LT], 1, 28, 28] ) )
### conv1 & pool1
cnn.conv1 = caffeL.Convolution( cnn.data, kernel_size = 5, stride = 1, num_output = 20, **params[LT] )
cnn.pool1 = caffeL.Pooling( cnn.conv1, pool = caffeP.Pooling.MAX, kernel_size = 2, stride = 2 )
### conv2 & pool2
cnn.conv2 = caffeL.Convolution( cnn.pool1, kernel_size = 5, stride = 1, num_output = 50, **params[LT] )
cnn.pool2 = caffeL.Pooling( cnn.conv2, pool = caffeP.Pooling.MAX, kernel_size = 2, stride = 2 )
### ip1 & relu1
cnn.ip1 = caffeL.InnerProduct( cnn.pool2, num_output = 500, **params[LT] )
cnn.relu1 = caffeL.ReLU( cnn.ip1, in_place = True )
### ip1
cnn.ip2 = caffeL.InnerProduct( cnn.relu1, num_output = 10 )
### softmax, loss & accuracy
if LT is 'L':
cnn.loss = caffeL.SoftmaxWithLoss( cnn.ip2, cnn.label )
elif LT is 'V':
cnn.loss = caffeL.SoftmaxWithLoss( cnn.ip2, cnn.label )
cnn.accuracy = caffeL.Accuracy( cnn.ip2, cnn.label, include = phase[LT] )
else:
cnn.softmax = caffeL.Softmax( cnn.ip2 )
return cnn
if __name__ == "__main__":
for LT in [ 'L', 'V', 'T' ]:
cnn = defineCNN( LT )
with open( 'ex160310' + LT + '.prototxt', 'w' ) as f:
print( cnn.to_proto(), file = f )
from __future__ import print_function
import caffe
fnSolver = 'ex160310_solver.prototxt'
# necessary for using GPU because "solver_mode" in the prototxt file is ignored
caffe.set_mode_gpu()
solver = caffe.SGDSolver( fnSolver )
#solver.solve()
for i in range( 110 ):
print( '#', solver.iter )
solver.step( 100 )
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.