Skip to content

Instantly share code, notes, and snippets.

@saudet
Last active June 3, 2016 01:42
Show Gist options
  • Save saudet/e5e22a17bdca9397aee69e85849de33d to your computer and use it in GitHub Desktop.
Save saudet/e5e22a17bdca9397aee69e85849de33d to your computer and use it in GitHub Desktop.
Simple Caffe model replicating the config used in MLPMnistSingleLayerExample
# The train/test net protocol buffer definition
net: "examples/mnist/simple_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 78
# Carry out testing every 500 training iterations.
test_interval: 7035
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.006
momentum: 0.9
weight_decay: 1e-4
# The learning rate policy
lr_policy: "fixed"
gamma: 0
power: 0
# Display every 100 iterations
display: 100
# The maximum number of iterations
max_iter: 7035
# snapshot intermediate results
snapshot: 7035
snapshot_prefix: "examples/mnist/simple"
# solver mode: CPU or GPU
solver_mode: CPU
type: "Nesterov"
name: "Simple"
layer {
name: "mnist"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
scale: 0.00390625
}
data_param {
source: "examples/mnist/mnist_train_lmdb"
batch_size: 128
backend: LMDB
}
}
layer {
name: "mnist"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
data_param {
source: "examples/mnist/mnist_test_lmdb"
batch_size: 128
backend: LMDB
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "data"
top: "ip1"
inner_product_param {
num_output: 1000
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
#!/usr/bin/env sh
./build/tools/caffe train --solver=examples/mnist/simple_solver.prototxt
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment