Created
July 9, 2015 18:28
-
-
Save drewlustro/cd82f17e24ebf3a6e855 to your computer and use it in GitHub Desktop.
Deploy Caffe prototxt for oxford102 model https://gist.github.com/jgoode21/0179e52305ca768a601f
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: "Oxford102CaffeNet" | |
input: "data" | |
input_dim: 10 | |
input_dim: 3 | |
input_dim: 227 | |
input_dim: 227 | |
force_backward: true | |
layers { | |
name: "conv1" | |
type: CONVOLUTION | |
bottom: "data" | |
top: "conv1" | |
blobs_lr: 1 | |
blobs_lr: 2 | |
weight_decay: 1 | |
weight_decay: 0 | |
convolution_param { | |
num_output: 96 | |
kernel_size: 11 | |
stride: 4 | |
weight_filler { | |
type: "gaussian" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layers { | |
name: "relu1" | |
type: RELU | |
bottom: "conv1" | |
top: "conv1" | |
} | |
layers { | |
name: "pool1" | |
type: POOLING | |
bottom: "conv1" | |
top: "pool1" | |
pooling_param { | |
pool: MAX | |
kernel_size: 3 | |
stride: 2 | |
} | |
} | |
layers { | |
name: "norm1" | |
type: LRN | |
bottom: "pool1" | |
top: "norm1" | |
lrn_param { | |
local_size: 5 | |
alpha: 0.0001 | |
beta: 0.75 | |
} | |
} | |
layers { | |
name: "conv2" | |
type: CONVOLUTION | |
bottom: "norm1" | |
top: "conv2" | |
blobs_lr: 1 | |
blobs_lr: 2 | |
weight_decay: 1 | |
weight_decay: 0 | |
convolution_param { | |
num_output: 256 | |
pad: 2 | |
kernel_size: 5 | |
group: 2 | |
weight_filler { | |
type: "gaussian" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 1 | |
} | |
} | |
} | |
layers { | |
name: "relu2" | |
type: RELU | |
bottom: "conv2" | |
top: "conv2" | |
} | |
layers { | |
name: "pool2" | |
type: POOLING | |
bottom: "conv2" | |
top: "pool2" | |
pooling_param { | |
pool: MAX | |
kernel_size: 3 | |
stride: 2 | |
} | |
} | |
layers { | |
name: "norm2" | |
type: LRN | |
bottom: "pool2" | |
top: "norm2" | |
lrn_param { | |
local_size: 5 | |
alpha: 0.0001 | |
beta: 0.75 | |
} | |
} | |
layers { | |
name: "conv3" | |
type: CONVOLUTION | |
bottom: "norm2" | |
top: "conv3" | |
blobs_lr: 1 | |
blobs_lr: 2 | |
weight_decay: 1 | |
weight_decay: 0 | |
convolution_param { | |
num_output: 384 | |
pad: 1 | |
kernel_size: 3 | |
weight_filler { | |
type: "gaussian" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layers { | |
name: "relu3" | |
type: RELU | |
bottom: "conv3" | |
top: "conv3" | |
} | |
layers { | |
name: "conv4" | |
type: CONVOLUTION | |
bottom: "conv3" | |
top: "conv4" | |
blobs_lr: 1 | |
blobs_lr: 2 | |
weight_decay: 1 | |
weight_decay: 0 | |
convolution_param { | |
num_output: 384 | |
pad: 1 | |
kernel_size: 3 | |
group: 2 | |
weight_filler { | |
type: "gaussian" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 1 | |
} | |
} | |
} | |
layers { | |
name: "relu4" | |
type: RELU | |
bottom: "conv4" | |
top: "conv4" | |
} | |
layers { | |
name: "conv5" | |
type: CONVOLUTION | |
bottom: "conv4" | |
top: "conv5" | |
blobs_lr: 1 | |
blobs_lr: 2 | |
weight_decay: 1 | |
weight_decay: 0 | |
convolution_param { | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
group: 2 | |
weight_filler { | |
type: "gaussian" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 1 | |
} | |
} | |
} | |
layers { | |
name: "relu5" | |
type: RELU | |
bottom: "conv5" | |
top: "conv5" | |
} | |
layers { | |
name: "pool5" | |
type: POOLING | |
bottom: "conv5" | |
top: "pool5" | |
pooling_param { | |
pool: MAX | |
kernel_size: 3 | |
stride: 2 | |
} | |
} | |
layers { | |
name: "fc6" | |
type: INNER_PRODUCT | |
bottom: "pool5" | |
top: "fc6" | |
blobs_lr: 1 | |
blobs_lr: 2 | |
weight_decay: 1 | |
weight_decay: 0 | |
inner_product_param { | |
num_output: 4096 | |
weight_filler { | |
type: "gaussian" | |
std: 0.005 | |
} | |
bias_filler { | |
type: "constant" | |
value: 1 | |
} | |
} | |
} | |
layers { | |
name: "relu6" | |
type: RELU | |
bottom: "fc6" | |
top: "fc6" | |
} | |
layers { | |
name: "drop6" | |
type: DROPOUT | |
bottom: "fc6" | |
top: "fc6" | |
dropout_param { | |
dropout_ratio: 0.5 | |
} | |
} | |
layers { | |
name: "fc7" | |
type: INNER_PRODUCT | |
bottom: "fc6" | |
top: "fc7" | |
# Note that blobs_lr can be set to 0 to disable any fine-tuning of this, and any other, layer | |
blobs_lr: 1 | |
blobs_lr: 2 | |
weight_decay: 1 | |
weight_decay: 0 | |
inner_product_param { | |
num_output: 4096 | |
weight_filler { | |
type: "gaussian" | |
std: 0.005 | |
} | |
bias_filler { | |
type: "constant" | |
value: 1 | |
} | |
} | |
} | |
layers { | |
name: "relu7" | |
type: RELU | |
bottom: "fc7" | |
top: "fc7" | |
} | |
layers { | |
name: "drop7" | |
type: DROPOUT | |
bottom: "fc7" | |
top: "fc7" | |
dropout_param { | |
dropout_ratio: 0.5 | |
} | |
} | |
layers { | |
name: "fc8_oxford_102" | |
type: INNER_PRODUCT | |
bottom: "fc7" | |
top: "fc8_oxford_102" | |
# blobs_lr is set to higher than for other layers, because this layer is starting from random while the others are already trained | |
blobs_lr: 10 | |
blobs_lr: 20 | |
weight_decay: 1 | |
weight_decay: 0 | |
inner_product_param { | |
num_output: 102 | |
weight_filler { | |
type: "gaussian" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "prob" | |
type: "Softmax" | |
bottom: "fc8_oxford_102" | |
top: "prob" | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment