Skip to content

Instantly share code, notes, and snippets.

@victorhcm
Last active October 30, 2017 01:12
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save victorhcm/1ff8ac0d62817f24a2ca66a376cfce92 to your computer and use it in GitHub Desktop.
Save victorhcm/1ff8ac0d62817f24a2ca66a376cfce92 to your computer and use it in GitHub Desktop.
FractalNet-34 prototxt for visualization at http://dgschwend.github.io/netscope
name: "FractalNet-34"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 224
input_dim: 224
# Input size: 32
layer {
bottom: "data"
top: "conv2_0"
name: "conv2_0"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_0"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_0_b"
}
type: "Convolution"
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_0"
type: "Dropout"
bottom: "conv2_0"
top: "conv2_0"
dropout_param {
dropout_ratio: 0.0
}
}
layer {
name: "batch_conv2_0"
type: "BatchNorm"
bottom: "conv2_0"
top: "conv2_0"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_0"
type: "ReLU"
bottom: "conv2_0"
top: "conv2_0"
}
layer {
bottom: "conv2_0"
top: "conv2_1"
name: "conv2_1"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_1"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_1_b"
}
type: "Convolution"
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_1"
type: "Dropout"
bottom: "conv2_1"
top: "conv2_1"
dropout_param {
dropout_ratio: 0.0
}
}
layer {
name: "batch_conv2_1"
type: "BatchNorm"
bottom: "conv2_1"
top: "conv2_1"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_1"
type: "ReLU"
bottom: "conv2_1"
top: "conv2_1"
}
layer {
bottom: "data"
top: "conv1_0"
name: "conv1_0"
param {
lr_mult: 1
decay_mult: 1
name: "conv1_0"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv1_0_b"
}
type: "Convolution"
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv1_0"
type: "Dropout"
bottom: "conv1_0"
top: "conv1_0"
dropout_param {
dropout_ratio: 0.0
}
}
layer {
name: "batch_conv1_0"
type: "BatchNorm"
bottom: "conv1_0"
top: "conv1_0"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv1_0"
type: "ReLU"
bottom: "conv1_0"
top: "conv1_0"
}
layer {
name: "join_conv2_1_plus"
type: "Concat"
bottom: "conv1_0"
bottom: "conv2_1"
top: "conv2_1_plus"
fractal_join_param {
drop_path_ratio: 0.15
}
}
layer {
bottom: "conv2_1_plus"
top: "conv2_2"
name: "conv2_2"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_2"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_2_b"
}
type: "Convolution"
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_2"
type: "Dropout"
bottom: "conv2_2"
top: "conv2_2"
dropout_param {
dropout_ratio: 0.0
}
}
layer {
name: "batch_conv2_2"
type: "BatchNorm"
bottom: "conv2_2"
top: "conv2_2"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_2"
type: "ReLU"
bottom: "conv2_2"
top: "conv2_2"
}
layer {
bottom: "conv2_2"
top: "conv2_3"
name: "conv2_3"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_3"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_3_b"
}
type: "Convolution"
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_3"
type: "Dropout"
bottom: "conv2_3"
top: "conv2_3"
dropout_param {
dropout_ratio: 0.0
}
}
layer {
name: "batch_conv2_3"
type: "BatchNorm"
bottom: "conv2_3"
top: "conv2_3"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_3"
type: "ReLU"
bottom: "conv2_3"
top: "conv2_3"
}
layer {
bottom: "conv2_1_plus"
top: "conv1_1"
name: "conv1_1"
param {
lr_mult: 1
decay_mult: 1
name: "conv1_1"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv1_1_b"
}
type: "Convolution"
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv1_1"
type: "Dropout"
bottom: "conv1_1"
top: "conv1_1"
dropout_param {
dropout_ratio: 0.0
}
}
layer {
name: "batch_conv1_1"
type: "BatchNorm"
bottom: "conv1_1"
top: "conv1_1"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv1_1"
type: "ReLU"
bottom: "conv1_1"
top: "conv1_1"
}
layer {
bottom: "data"
top: "conv0_0"
name: "conv0_0"
param {
lr_mult: 1
decay_mult: 1
name: "conv0_0"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv0_0_b"
}
type: "Convolution"
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv0_0"
type: "Dropout"
bottom: "conv0_0"
top: "conv0_0"
dropout_param {
dropout_ratio: 0.0
}
}
layer {
name: "batch_conv0_0"
type: "BatchNorm"
bottom: "conv0_0"
top: "conv0_0"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv0_0"
type: "ReLU"
bottom: "conv0_0"
top: "conv0_0"
}
layer {
bottom: "conv0_0"
top: "pool0_0"
name: "pool0_0"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "conv1_1"
top: "pool1_1"
name: "pool1_1"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "conv2_3"
top: "pool2_3"
name: "pool2_3"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "join_pool2_3_plus"
type: "Concat"
bottom: "pool0_0"
bottom: "pool1_1"
bottom: "pool2_3"
top: "pool2_3_plus"
fractal_join_param {
drop_path_ratio: 0.15
}
}
# Reduction: 1, spatial size: 16
layer {
bottom: "pool2_3_plus"
top: "conv2_4"
name: "conv2_4"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_4"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_4_b"
}
type: "Convolution"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_4"
type: "Dropout"
bottom: "conv2_4"
top: "conv2_4"
dropout_param {
dropout_ratio: 0.1
}
}
layer {
name: "batch_conv2_4"
type: "BatchNorm"
bottom: "conv2_4"
top: "conv2_4"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_4"
type: "ReLU"
bottom: "conv2_4"
top: "conv2_4"
}
layer {
bottom: "conv2_4"
top: "conv2_5"
name: "conv2_5"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_5"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_5_b"
}
type: "Convolution"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_5"
type: "Dropout"
bottom: "conv2_5"
top: "conv2_5"
dropout_param {
dropout_ratio: 0.1
}
}
layer {
name: "batch_conv2_5"
type: "BatchNorm"
bottom: "conv2_5"
top: "conv2_5"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_5"
type: "ReLU"
bottom: "conv2_5"
top: "conv2_5"
}
layer {
bottom: "pool2_3_plus"
top: "conv1_2"
name: "conv1_2"
param {
lr_mult: 1
decay_mult: 1
name: "conv1_2"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv1_2_b"
}
type: "Convolution"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv1_2"
type: "Dropout"
bottom: "conv1_2"
top: "conv1_2"
dropout_param {
dropout_ratio: 0.1
}
}
layer {
name: "batch_conv1_2"
type: "BatchNorm"
bottom: "conv1_2"
top: "conv1_2"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv1_2"
type: "ReLU"
bottom: "conv1_2"
top: "conv1_2"
}
layer {
name: "join_conv2_5_plus"
type: "Concat"
bottom: "conv1_2"
bottom: "conv2_5"
top: "conv2_5_plus"
fractal_join_param {
drop_path_ratio: 0.15
}
}
layer {
bottom: "conv2_5_plus"
top: "conv2_6"
name: "conv2_6"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_6"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_6_b"
}
type: "Convolution"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_6"
type: "Dropout"
bottom: "conv2_6"
top: "conv2_6"
dropout_param {
dropout_ratio: 0.1
}
}
layer {
name: "batch_conv2_6"
type: "BatchNorm"
bottom: "conv2_6"
top: "conv2_6"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_6"
type: "ReLU"
bottom: "conv2_6"
top: "conv2_6"
}
layer {
bottom: "conv2_6"
top: "conv2_7"
name: "conv2_7"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_7"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_7_b"
}
type: "Convolution"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_7"
type: "Dropout"
bottom: "conv2_7"
top: "conv2_7"
dropout_param {
dropout_ratio: 0.1
}
}
layer {
name: "batch_conv2_7"
type: "BatchNorm"
bottom: "conv2_7"
top: "conv2_7"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_7"
type: "ReLU"
bottom: "conv2_7"
top: "conv2_7"
}
layer {
bottom: "conv2_5_plus"
top: "conv1_3"
name: "conv1_3"
param {
lr_mult: 1
decay_mult: 1
name: "conv1_3"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv1_3_b"
}
type: "Convolution"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv1_3"
type: "Dropout"
bottom: "conv1_3"
top: "conv1_3"
dropout_param {
dropout_ratio: 0.1
}
}
layer {
name: "batch_conv1_3"
type: "BatchNorm"
bottom: "conv1_3"
top: "conv1_3"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv1_3"
type: "ReLU"
bottom: "conv1_3"
top: "conv1_3"
}
layer {
bottom: "pool2_3_plus"
top: "conv0_1"
name: "conv0_1"
param {
lr_mult: 1
decay_mult: 1
name: "conv0_1"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv0_1_b"
}
type: "Convolution"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv0_1"
type: "Dropout"
bottom: "conv0_1"
top: "conv0_1"
dropout_param {
dropout_ratio: 0.1
}
}
layer {
name: "batch_conv0_1"
type: "BatchNorm"
bottom: "conv0_1"
top: "conv0_1"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv0_1"
type: "ReLU"
bottom: "conv0_1"
top: "conv0_1"
}
layer {
bottom: "conv0_1"
top: "pool0_1"
name: "pool0_1"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "conv1_3"
top: "pool1_3"
name: "pool1_3"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "conv2_7"
top: "pool2_7"
name: "pool2_7"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "join_pool2_7_plus"
type: "Concat"
bottom: "pool0_1"
bottom: "pool1_3"
bottom: "pool2_7"
top: "pool2_7_plus"
fractal_join_param {
drop_path_ratio: 0.15
}
}
# Reduction: 2, spatial size: 8
layer {
bottom: "pool2_7_plus"
top: "conv2_8"
name: "conv2_8"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_8"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_8_b"
}
type: "Convolution"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_8"
type: "Dropout"
bottom: "conv2_8"
top: "conv2_8"
dropout_param {
dropout_ratio: 0.2
}
}
layer {
name: "batch_conv2_8"
type: "BatchNorm"
bottom: "conv2_8"
top: "conv2_8"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_8"
type: "ReLU"
bottom: "conv2_8"
top: "conv2_8"
}
layer {
bottom: "conv2_8"
top: "conv2_9"
name: "conv2_9"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_9"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_9_b"
}
type: "Convolution"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_9"
type: "Dropout"
bottom: "conv2_9"
top: "conv2_9"
dropout_param {
dropout_ratio: 0.2
}
}
layer {
name: "batch_conv2_9"
type: "BatchNorm"
bottom: "conv2_9"
top: "conv2_9"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_9"
type: "ReLU"
bottom: "conv2_9"
top: "conv2_9"
}
layer {
bottom: "pool2_7_plus"
top: "conv1_4"
name: "conv1_4"
param {
lr_mult: 1
decay_mult: 1
name: "conv1_4"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv1_4_b"
}
type: "Convolution"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv1_4"
type: "Dropout"
bottom: "conv1_4"
top: "conv1_4"
dropout_param {
dropout_ratio: 0.2
}
}
layer {
name: "batch_conv1_4"
type: "BatchNorm"
bottom: "conv1_4"
top: "conv1_4"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv1_4"
type: "ReLU"
bottom: "conv1_4"
top: "conv1_4"
}
layer {
name: "join_conv2_9_plus"
type: "Concat"
bottom: "conv1_4"
bottom: "conv2_9"
top: "conv2_9_plus"
fractal_join_param {
drop_path_ratio: 0.15
}
}
layer {
bottom: "conv2_9_plus"
top: "conv2_10"
name: "conv2_10"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_10"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_10_b"
}
type: "Convolution"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_10"
type: "Dropout"
bottom: "conv2_10"
top: "conv2_10"
dropout_param {
dropout_ratio: 0.2
}
}
layer {
name: "batch_conv2_10"
type: "BatchNorm"
bottom: "conv2_10"
top: "conv2_10"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_10"
type: "ReLU"
bottom: "conv2_10"
top: "conv2_10"
}
layer {
bottom: "conv2_10"
top: "conv2_11"
name: "conv2_11"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_11"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_11_b"
}
type: "Convolution"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_11"
type: "Dropout"
bottom: "conv2_11"
top: "conv2_11"
dropout_param {
dropout_ratio: 0.2
}
}
layer {
name: "batch_conv2_11"
type: "BatchNorm"
bottom: "conv2_11"
top: "conv2_11"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_11"
type: "ReLU"
bottom: "conv2_11"
top: "conv2_11"
}
layer {
bottom: "conv2_9_plus"
top: "conv1_5"
name: "conv1_5"
param {
lr_mult: 1
decay_mult: 1
name: "conv1_5"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv1_5_b"
}
type: "Convolution"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv1_5"
type: "Dropout"
bottom: "conv1_5"
top: "conv1_5"
dropout_param {
dropout_ratio: 0.2
}
}
layer {
name: "batch_conv1_5"
type: "BatchNorm"
bottom: "conv1_5"
top: "conv1_5"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv1_5"
type: "ReLU"
bottom: "conv1_5"
top: "conv1_5"
}
layer {
bottom: "pool2_7_plus"
top: "conv0_2"
name: "conv0_2"
param {
lr_mult: 1
decay_mult: 1
name: "conv0_2"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv0_2_b"
}
type: "Convolution"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv0_2"
type: "Dropout"
bottom: "conv0_2"
top: "conv0_2"
dropout_param {
dropout_ratio: 0.2
}
}
layer {
name: "batch_conv0_2"
type: "BatchNorm"
bottom: "conv0_2"
top: "conv0_2"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv0_2"
type: "ReLU"
bottom: "conv0_2"
top: "conv0_2"
}
layer {
bottom: "conv0_2"
top: "pool0_2"
name: "pool0_2"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "conv1_5"
top: "pool1_5"
name: "pool1_5"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "conv2_11"
top: "pool2_11"
name: "pool2_11"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "join_pool2_11_plus"
type: "Concat"
bottom: "pool0_2"
bottom: "pool1_5"
bottom: "pool2_11"
top: "pool2_11_plus"
fractal_join_param {
drop_path_ratio: 0.15
}
}
# Reduction: 3, spatial size: 4
layer {
bottom: "pool2_11_plus"
top: "conv2_12"
name: "conv2_12"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_12"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_12_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_12"
type: "Dropout"
bottom: "conv2_12"
top: "conv2_12"
dropout_param {
dropout_ratio: 0.3
}
}
layer {
name: "batch_conv2_12"
type: "BatchNorm"
bottom: "conv2_12"
top: "conv2_12"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_12"
type: "ReLU"
bottom: "conv2_12"
top: "conv2_12"
}
layer {
bottom: "conv2_12"
top: "conv2_13"
name: "conv2_13"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_13"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_13_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_13"
type: "Dropout"
bottom: "conv2_13"
top: "conv2_13"
dropout_param {
dropout_ratio: 0.3
}
}
layer {
name: "batch_conv2_13"
type: "BatchNorm"
bottom: "conv2_13"
top: "conv2_13"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_13"
type: "ReLU"
bottom: "conv2_13"
top: "conv2_13"
}
layer {
bottom: "pool2_11_plus"
top: "conv1_6"
name: "conv1_6"
param {
lr_mult: 1
decay_mult: 1
name: "conv1_6"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv1_6_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv1_6"
type: "Dropout"
bottom: "conv1_6"
top: "conv1_6"
dropout_param {
dropout_ratio: 0.3
}
}
layer {
name: "batch_conv1_6"
type: "BatchNorm"
bottom: "conv1_6"
top: "conv1_6"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv1_6"
type: "ReLU"
bottom: "conv1_6"
top: "conv1_6"
}
layer {
name: "join_conv2_13_plus"
type: "Concat"
bottom: "conv1_6"
bottom: "conv2_13"
top: "conv2_13_plus"
fractal_join_param {
drop_path_ratio: 0.15
}
}
layer {
bottom: "conv2_13_plus"
top: "conv2_14"
name: "conv2_14"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_14"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_14_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_14"
type: "Dropout"
bottom: "conv2_14"
top: "conv2_14"
dropout_param {
dropout_ratio: 0.3
}
}
layer {
name: "batch_conv2_14"
type: "BatchNorm"
bottom: "conv2_14"
top: "conv2_14"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_14"
type: "ReLU"
bottom: "conv2_14"
top: "conv2_14"
}
layer {
bottom: "conv2_14"
top: "conv2_15"
name: "conv2_15"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_15"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_15_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_15"
type: "Dropout"
bottom: "conv2_15"
top: "conv2_15"
dropout_param {
dropout_ratio: 0.3
}
}
layer {
name: "batch_conv2_15"
type: "BatchNorm"
bottom: "conv2_15"
top: "conv2_15"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_15"
type: "ReLU"
bottom: "conv2_15"
top: "conv2_15"
}
layer {
bottom: "conv2_13_plus"
top: "conv1_7"
name: "conv1_7"
param {
lr_mult: 1
decay_mult: 1
name: "conv1_7"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv1_7_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv1_7"
type: "Dropout"
bottom: "conv1_7"
top: "conv1_7"
dropout_param {
dropout_ratio: 0.3
}
}
layer {
name: "batch_conv1_7"
type: "BatchNorm"
bottom: "conv1_7"
top: "conv1_7"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv1_7"
type: "ReLU"
bottom: "conv1_7"
top: "conv1_7"
}
layer {
bottom: "pool2_11_plus"
top: "conv0_3"
name: "conv0_3"
param {
lr_mult: 1
decay_mult: 1
name: "conv0_3"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv0_3_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv0_3"
type: "Dropout"
bottom: "conv0_3"
top: "conv0_3"
dropout_param {
dropout_ratio: 0.3
}
}
layer {
name: "batch_conv0_3"
type: "BatchNorm"
bottom: "conv0_3"
top: "conv0_3"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv0_3"
type: "ReLU"
bottom: "conv0_3"
top: "conv0_3"
}
layer {
bottom: "conv0_3"
top: "pool0_3"
name: "pool0_3"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "conv1_7"
top: "pool1_7"
name: "pool1_7"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "conv2_15"
top: "pool2_15"
name: "pool2_15"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "join_pool2_15_plus"
type: "Concat"
bottom: "pool0_3"
bottom: "pool1_7"
bottom: "pool2_15"
top: "pool2_15_plus"
fractal_join_param {
drop_path_ratio: 0.15
}
}
# Reduction: 4, spatial size: 2
layer {
bottom: "pool2_15_plus"
top: "conv2_16"
name: "conv2_16"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_16"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_16_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_16"
type: "Dropout"
bottom: "conv2_16"
top: "conv2_16"
dropout_param {
dropout_ratio: 0.4
}
}
layer {
name: "batch_conv2_16"
type: "BatchNorm"
bottom: "conv2_16"
top: "conv2_16"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_16"
type: "ReLU"
bottom: "conv2_16"
top: "conv2_16"
}
layer {
bottom: "conv2_16"
top: "conv2_17"
name: "conv2_17"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_17"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_17_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_17"
type: "Dropout"
bottom: "conv2_17"
top: "conv2_17"
dropout_param {
dropout_ratio: 0.4
}
}
layer {
name: "batch_conv2_17"
type: "BatchNorm"
bottom: "conv2_17"
top: "conv2_17"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_17"
type: "ReLU"
bottom: "conv2_17"
top: "conv2_17"
}
layer {
bottom: "pool2_15_plus"
top: "conv1_8"
name: "conv1_8"
param {
lr_mult: 1
decay_mult: 1
name: "conv1_8"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv1_8_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv1_8"
type: "Dropout"
bottom: "conv1_8"
top: "conv1_8"
dropout_param {
dropout_ratio: 0.4
}
}
layer {
name: "batch_conv1_8"
type: "BatchNorm"
bottom: "conv1_8"
top: "conv1_8"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv1_8"
type: "ReLU"
bottom: "conv1_8"
top: "conv1_8"
}
layer {
name: "join_conv2_17_plus"
type: "Concat"
bottom: "conv1_8"
bottom: "conv2_17"
top: "conv2_17_plus"
fractal_join_param {
drop_path_ratio: 0.15
}
}
layer {
bottom: "conv2_17_plus"
top: "conv2_18"
name: "conv2_18"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_18"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_18_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_18"
type: "Dropout"
bottom: "conv2_18"
top: "conv2_18"
dropout_param {
dropout_ratio: 0.4
}
}
layer {
name: "batch_conv2_18"
type: "BatchNorm"
bottom: "conv2_18"
top: "conv2_18"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_18"
type: "ReLU"
bottom: "conv2_18"
top: "conv2_18"
}
layer {
bottom: "conv2_18"
top: "conv2_19"
name: "conv2_19"
param {
lr_mult: 1
decay_mult: 1
name: "conv2_19"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv2_19_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv2_19"
type: "Dropout"
bottom: "conv2_19"
top: "conv2_19"
dropout_param {
dropout_ratio: 0.4
}
}
layer {
name: "batch_conv2_19"
type: "BatchNorm"
bottom: "conv2_19"
top: "conv2_19"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv2_19"
type: "ReLU"
bottom: "conv2_19"
top: "conv2_19"
}
layer {
bottom: "conv2_17_plus"
top: "conv1_9"
name: "conv1_9"
param {
lr_mult: 1
decay_mult: 1
name: "conv1_9"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv1_9_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv1_9"
type: "Dropout"
bottom: "conv1_9"
top: "conv1_9"
dropout_param {
dropout_ratio: 0.4
}
}
layer {
name: "batch_conv1_9"
type: "BatchNorm"
bottom: "conv1_9"
top: "conv1_9"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv1_9"
type: "ReLU"
bottom: "conv1_9"
top: "conv1_9"
}
layer {
bottom: "pool2_15_plus"
top: "conv0_4"
name: "conv0_4"
param {
lr_mult: 1
decay_mult: 1
name: "conv0_4"
}
param {
lr_mult: 2
decay_mult: 0
name: "conv0_4_b"
}
type: "Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "dropout_conv0_4"
type: "Dropout"
bottom: "conv0_4"
top: "conv0_4"
dropout_param {
dropout_ratio: 0.4
}
}
layer {
name: "batch_conv0_4"
type: "BatchNorm"
bottom: "conv0_4"
top: "conv0_4"
param { lr_mult: 0 }
param { lr_mult: 0 }
param { lr_mult: 0 }
}
layer {
name: "relu_conv0_4"
type: "ReLU"
bottom: "conv0_4"
top: "conv0_4"
}
layer {
bottom: "conv0_4"
top: "pool0_4"
name: "pool0_4"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "conv1_9"
top: "pool1_9"
name: "pool1_9"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "conv2_19"
top: "pool2_19"
name: "pool2_19"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "join_pool2_19_plus"
type: "Concat"
bottom: "pool0_4"
bottom: "pool1_9"
bottom: "pool2_19"
top: "pool2_19_plus"
fractal_join_param {
drop_path_ratio: 0.15
}
}
# Reduction: 5, spatial size: 1
layer {
name: "prediction0"
type: "InnerProduct"
bottom: "pool2_19_plus"
top: "prediction0"
param {
lr_mult: 1
decay_mult: 1
name: "prediction0"
}
param {
lr_mult: 2
decay_mult: 0
name: "prediction0_b"
}
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "loss0"
type: "SoftmaxWithLoss"
bottom: "prediction0"
bottom: "label"
top: "loss0"
loss_weight: 1.0
include: { phase: TRAIN }
}
layer {
name: "accuracy_loss0"
type: "Accuracy"
bottom: "prediction0"
bottom: "label"
top: "accuracy_loss0"
include: { phase: TEST }
}
@victorhcm
Copy link
Author

This prototxt is incorrect. As FractalNet uses some custom layers that aren't available yet in netscope, I replaced them just for visualization, like "FractalJoin" by "Concat".

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment