Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save ashutoshsatapathy1990/2b69006b0866418e4556bc8d3ae7ecb0 to your computer and use it in GitHub Desktop.
Save ashutoshsatapathy1990/2b69006b0866418e4556bc8d3ae7ecb0 to your computer and use it in GitHub Desktop.
VGG-19 enables with batch normalization layer after each convolution layer
#VGGNet19-BN
name: "VGGNet19"
layer {
name: "train-data"
type: "Data"
top: "data"
top: "label"
transform_param {
mirror: true
crop_size: 224
}
data_param {
batch_size: 32
}
include { stage: "train" }
}
layer {
name: "val-data"
type: "Data"
top: "data"
top: "label"
transform_param {
mirror: false
crop_size: 224
}
data_param {
batch_size: 16
}
include { stage: "val" }
}
layer {
bottom:"data"
top:"conv1_1"
name:"conv1_1"
type:"Convolution"
convolution_param {
num_output:64
pad:1
kernel_size:3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv1_1"
name: "conv1_1/bn"
top: "conv1_1/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv1_1/bn"
top:"conv1_1/bn"
name:"relu1_1"
type:"ReLU"
}
layer {
bottom:"conv1_1/bn"
top:"conv1_2"
name:"conv1_2"
type:"Convolution"
convolution_param {
num_output:64
pad:1
kernel_size:3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv1_2"
name: "conv1_2/bn"
top: "conv1_2/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv1_2/bn"
top:"conv1_2/bn"
name:"relu1_2"
type:"ReLU"
}
layer {
bottom:"conv1_2/bn"
top:"pool1"
name:"pool1"
type:"Pooling"
pooling_param {
pool:MAX
kernel_size:2
stride:2
}
}
layer {
bottom:"pool1"
top:"conv2_1"
name:"conv2_1"
type:"Convolution"
convolution_param {
num_output:128
pad:1
kernel_size:3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv2_1"
name: "conv2_1/bn"
top: "conv2_1/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv2_1/bn"
top:"conv2_1/bn"
name:"relu2_1"
type:"ReLU"
}
layer {
bottom:"conv2_1/bn"
top:"conv2_2"
name:"conv2_2"
type:"Convolution"
convolution_param {
num_output:128
pad:1
kernel_size:3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv2_2"
name: "conv2_2/bn"
top: "conv2_2/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv2_2/bn"
top:"conv2_2/bn"
name:"relu2_2"
type:"ReLU"
}
layer {
bottom:"conv2_2/bn"
top:"pool2"
name:"pool2"
type:"Pooling"
pooling_param {
pool:MAX
kernel_size:2
stride:2
}
}
layer {
bottom:"pool2"
top:"conv3_1"
name: "conv3_1"
type:"Convolution"
convolution_param {
num_output:256
pad:1
kernel_size:3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv3_1"
name: "conv3_1/bn"
top: "conv3_1/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv3_1/bn"
top:"conv3_1/bn"
name:"relu3_1"
type:"ReLU"
}
layer {
bottom:"conv3_1/bn"
top:"conv3_2"
name:"conv3_2"
type:"Convolution"
convolution_param {
num_output:256
pad:1
kernel_size:3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv3_2"
name: "conv3_2/bn"
top: "conv3_2/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv3_2/bn"
top:"conv3_2/bn"
name:"relu3_2"
type:"ReLU"
}
layer {
bottom:"conv3_2/bn"
top:"conv3_3"
name:"conv3_3"
type:"Convolution"
convolution_param {
num_output:256
pad:1
kernel_size:3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv3_3"
name: "conv3_3/bn"
top: "conv3_3/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv3_3/bn"
top:"conv3_3/bn"
name:"relu3_3"
type:"ReLU"
}
layer {
bottom:"conv3_3/bn"
top:"conv3_4"
name:"conv3_4"
type:"Convolution"
convolution_param {
num_output:256
pad:1
kernel_size:3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv3_4"
name: "conv3_4/bn"
top: "conv3_4/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv3_4/bn"
top:"conv3_4/bn"
name:"relu3_4"
type:"ReLU"
}
layer {
bottom:"conv3_4/bn"
top:"pool3"
name:"pool3"
type:"Pooling"
pooling_param {
pool:MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom:"pool3"
top:"conv4_1"
name:"conv4_1"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv4_1"
name: "conv4_1/bn"
top: "conv4_1/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv4_1/bn"
top:"conv4_1/bn"
name:"relu4_1"
type:"ReLU"
}
layer {
bottom:"conv4_1/bn"
top:"conv4_2"
name:"conv4_2"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv4_2"
name: "conv4_2/bn"
top: "conv4_2/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv4_2/bn"
top:"conv4_2/bn"
name:"relu4_2"
type:"ReLU"
}
layer {
bottom:"conv4_2/bn"
top:"conv4_3"
name:"conv4_3"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv4_3"
name: "conv4_3/bn"
top: "conv4_3/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv4_3/bn"
top:"conv4_3/bn"
name:"relu4_3"
type:"ReLU"
}
layer {
bottom:"conv4_3/bn"
top:"conv4_4"
name:"conv4_4"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv4_4"
name: "conv4_4/bn"
top: "conv4_4/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv4_4/bn"
top:"conv4_4/bn"
name:"relu4_4"
type:"ReLU"
}
layer {
bottom:"conv4_4/bn"
top:"pool4"
name:"pool4"
type:"Pooling"
pooling_param {
pool:MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom:"pool4"
top:"conv5_1"
name:"conv5_1"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv5_1"
name: "conv5_1/bn"
top: "conv5_1/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv5_1/bn"
top:"conv5_1/bn"
name:"relu5_1"
type:"ReLU"
}
layer {
bottom:"conv5_1/bn"
top:"conv5_2"
name:"conv5_2"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv5_2"
name: "conv5_2/bn"
top: "conv5_2/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv5_2/bn"
top:"conv5_2/bn"
name:"relu5_2"
type:"ReLU"
}
layer {
bottom:"conv5_2/bn"
top:"conv5_3"
name:"conv5_3"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv5_3"
name: "conv5_3/bn"
top: "conv5_3/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv5_3/bn"
top:"conv5_3/bn"
name:"relu5_3"
type:"ReLU"
}
layer {
bottom:"conv5_3/bn"
top:"conv5_4"
name:"conv5_4"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom: "conv5_4"
name: "conv5_4/bn"
top: "conv5_4/bn"
type: "BatchNorm"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
batch_norm_param {
scale_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom:"conv5_4/bn"
top:"conv5_4/bn"
name:"relu5_4"
type:"ReLU"
}
layer {
bottom:"conv5_4/bn"
top:"pool5"
name:"pool5"
type:"Pooling"
pooling_param {
pool:MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom:"pool5"
top:"fc6"
name:"fc6"
type:"InnerProduct"
inner_product_param {
num_output: 4096
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom:"fc6"
top:"fc6"
name:"relu6"
type:"ReLU"
}
layer {
bottom:"fc6"
top:"fc6"
name:"drop6"
type:"Dropout"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
bottom:"fc6"
top:"fc7"
name:"fc7"
type:"InnerProduct"
inner_product_param {
num_output: 4096
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
bottom:"fc7"
top:"fc7"
name:"relu7"
type:"ReLU"
}
layer {
bottom:"fc7"
top:"fc7"
name:"drop7"
type:"Dropout"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
bottom:"fc7"
top:"fc8"
name:"fc8"
type:"InnerProduct"
inner_product_param {
#num_output: 0
weight_filler {
type: "msra"
std: 0.0005
}
bias_filler {
type: "constant"
value: 0
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc8"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "fc8"
top: "softmax"
include { stage: "deploy" }
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment