Created
July 16, 2019 10:18
-
-
Save ashutoshsatapathy1990/449ff2692892d8d4406de988d44511d7 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# DPN-68 | |
name: "DPN-68" | |
layer { | |
name: "train-data" | |
type: "Data" | |
top: "data" | |
top: "label" | |
transform_param { | |
mirror: true | |
crop_size: 224 | |
} | |
data_param { | |
batch_size: 32 | |
} | |
include { stage: "train" } | |
} | |
layer { | |
name: "val-data" | |
type: "Data" | |
top: "data" | |
top: "label" | |
transform_param { | |
mirror: false | |
crop_size: 224 | |
} | |
data_param { | |
batch_size: 16 | |
} | |
include { stage: "val" } | |
} | |
layer { | |
name: "conv1" | |
type: "Convolution" | |
bottom: "data" | |
top: "conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 10 | |
kernel_size: 3 | |
pad: 1 | |
stride: 2 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "conv1_bn" | |
type: "BatchNorm" | |
bottom: "conv1" | |
top: "conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "conv1_relu" | |
type: "ReLU" | |
bottom: "conv1_bn" | |
top: "conv1_bn" | |
} | |
layer { | |
name: "pool1" | |
type: "Pooling" | |
bottom: "conv1_bn" | |
top: "pool1" | |
pooling_param { | |
pool: MAX | |
kernel_size: 3 | |
stride: 2 | |
pad: 1 | |
} | |
} | |
#################### dpn1 #################### | |
layer { | |
name: "dpn1_match_bn" | |
type: "BatchNorm" | |
bottom: "pool1" | |
top: "dpn1_match_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn1_match_relu" | |
type: "ReLU" | |
bottom: "dpn1_match_bn" | |
top: "dpn1_match_bn" | |
} | |
layer { | |
name: "dpn1_match_conv" | |
type: "Convolution" | |
bottom: "dpn1_match_bn" | |
top: "dpn1_match_conv" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 96 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn1_match_conv_Slice" | |
type: "Slice" | |
bottom: "dpn1_match_conv" | |
top: "dpn1_match_conv_split1" | |
top: "dpn1_match_conv_split2" | |
#0-63 64-95 | |
slice_param { | |
axis: 1 | |
slice_point: 64 | |
} | |
} | |
layer { | |
name: "dpn1_bn" | |
type: "BatchNorm" | |
bottom: "pool1" | |
top: "dpn1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn1_relu" | |
type: "ReLU" | |
bottom: "dpn1_bn" | |
top: "dpn1_bn" | |
} | |
layer { | |
name: "dpn1_conv1" | |
type: "Convolution" | |
bottom: "dpn1_bn" | |
top: "dpn1_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn1_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn1_conv1" | |
top: "dpn1_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn1_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn1_conv1_bn" | |
top: "dpn1_conv1_bn" | |
} | |
layer { | |
name: "dpn1_conv2" | |
type: "Convolution" | |
bottom: "dpn1_conv1_bn" | |
top: "dpn1_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn1_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn1_conv2" | |
top: "dpn1_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn1_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn1_conv2_bn" | |
top: "dpn1_conv2_bn" | |
} | |
layer { | |
name: "dpn1_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn1_conv2_bn" | |
top: "dpn1_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn1_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn1_conv2_bn" | |
top: "dpn1_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 16 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn1_elewise" | |
type: "Eltwise" | |
bottom: "dpn1_match_conv_split1" | |
bottom: "dpn1_conv3_branch1" | |
top: "dpn1_elewise" | |
} | |
layer { | |
name: "dpn1_concat" | |
type: "Concat" | |
bottom: "dpn1_match_conv_split2" | |
bottom: "dpn1_conv3_branch2" | |
top: "dpn1_concat" | |
} | |
#################### dpn2 #################### | |
layer { | |
name: "dpn2_concat_input" | |
type: "Concat" | |
bottom: "dpn1_elewise" | |
bottom: "dpn1_concat" | |
top: "dpn2_concat_input" | |
} | |
layer { | |
name: "dpn2_bn" | |
type: "BatchNorm" | |
bottom: "dpn2_concat_input" | |
top: "dpn2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn2_relu" | |
type: "ReLU" | |
bottom: "dpn2_bn" | |
top: "dpn2_bn" | |
} | |
layer { | |
name: "dpn2_conv1" | |
type: "Convolution" | |
bottom: "dpn2_bn" | |
top: "dpn2_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn2_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn2_conv1" | |
top: "dpn2_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn2_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn2_conv1_bn" | |
top: "dpn2_conv1_bn" | |
} | |
layer { | |
name: "dpn2_conv2" | |
type: "Convolution" | |
bottom: "dpn2_conv1_bn" | |
top: "dpn2_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn2_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn2_conv2" | |
top: "dpn2_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn2_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn2_conv2_bn" | |
top: "dpn2_conv2_bn" | |
} | |
layer { | |
name: "dpn2_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn2_conv2_bn" | |
top: "dpn2_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn2_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn2_conv2_bn" | |
top: "dpn2_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 16 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn2_elewise" | |
type: "Eltwise" | |
bottom: "dpn1_elewise" | |
bottom: "dpn2_conv3_branch1" | |
top: "dpn2_elewise" | |
} | |
layer { | |
name: "dpn2_concat" | |
type: "Concat" | |
bottom: "dpn1_concat" | |
bottom: "dpn2_conv3_branch2" | |
top: "dpn2_concat" | |
} | |
#################### dpn3 #################### | |
layer { | |
name: "dpn3_concat_input" | |
type: "Concat" | |
bottom: "dpn2_elewise" | |
bottom: "dpn2_concat" | |
top: "dpn3_concat_input" | |
} | |
layer { | |
name: "dpn3_bn" | |
type: "BatchNorm" | |
bottom: "dpn3_concat_input" | |
top: "dpn3_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn3_relu" | |
type: "ReLU" | |
bottom: "dpn3_bn" | |
top: "dpn3_bn" | |
} | |
layer { | |
name: "dpn3_conv1" | |
type: "Convolution" | |
bottom: "dpn3_bn" | |
top: "dpn3_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn3_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn3_conv1" | |
top: "dpn3_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn3_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn3_conv1_bn" | |
top: "dpn3_conv1_bn" | |
} | |
layer { | |
name: "dpn3_conv2" | |
type: "Convolution" | |
bottom: "dpn3_conv1_bn" | |
top: "dpn3_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn3_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn3_conv2" | |
top: "dpn3_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn3_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn3_conv2_bn" | |
top: "dpn3_conv2_bn" | |
} | |
layer { | |
name: "dpn3_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn3_conv2_bn" | |
top: "dpn3_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn3_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn3_conv2_bn" | |
top: "dpn3_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 16 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn3_elewise" | |
type: "Eltwise" | |
bottom: "dpn2_elewise" | |
bottom: "dpn3_conv3_branch1" | |
top: "dpn3_elewise" | |
} | |
layer { | |
name: "dpn3_concat" | |
type: "Concat" | |
bottom: "dpn2_concat" | |
bottom: "dpn3_conv3_branch2" | |
top: "dpn3_concat" | |
} | |
#################### dpn4 #################### | |
layer { | |
name: "dpn4_concat_input" | |
type: "Concat" | |
bottom: "dpn3_elewise" | |
bottom: "dpn3_concat" | |
top: "dpn4_concat_input" | |
} | |
layer { | |
name: "dpn4_match_bn" | |
type: "BatchNorm" | |
bottom: "dpn4_concat_input" | |
top: "dpn4_match_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn4_match_relu" | |
type: "ReLU" | |
bottom: "dpn4_match_bn" | |
top: "dpn4_match_bn" | |
} | |
layer { | |
name: "dpn4_match_conv" | |
type: "Convolution" | |
bottom: "dpn4_match_bn" | |
top: "dpn4_match_conv" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 192 | |
kernel_size: 1 | |
pad: 0 | |
stride: 2 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn4_match_conv_Slice" | |
type: "Slice" | |
bottom: "dpn4_match_conv" | |
top: "dpn4_match_conv_split1" | |
top: "dpn4_match_conv_split2" | |
#0-127, 128-191 | |
slice_param { | |
axis: 1 | |
slice_point: 128 | |
} | |
} | |
layer { | |
name: "dpn4_bn" | |
type: "BatchNorm" | |
bottom: "dpn4_concat_input" | |
top: "dpn4_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn4_relu" | |
type: "ReLU" | |
bottom: "dpn4_bn" | |
top: "dpn4_bn" | |
} | |
layer { | |
name: "dpn4_conv1" | |
type: "Convolution" | |
bottom: "dpn4_bn" | |
top: "dpn4_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn4_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn4_conv1" | |
top: "dpn4_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn4_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn4_conv1_bn" | |
top: "dpn4_conv1_bn" | |
} | |
layer { | |
name: "dpn4_conv2" | |
type: "Convolution" | |
bottom: "dpn4_conv1_bn" | |
top: "dpn4_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 2 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn4_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn4_conv2" | |
top: "dpn4_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn4_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn4_conv2_bn" | |
top: "dpn4_conv2_bn" | |
} | |
layer { | |
name: "dpn4_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn4_conv2_bn" | |
top: "dpn4_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn4_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn4_conv2_bn" | |
top: "dpn4_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn4_elewise" | |
type: "Eltwise" | |
bottom: "dpn4_match_conv_split1" | |
bottom: "dpn4_conv3_branch1" | |
top: "dpn4_elewise" | |
} | |
layer { | |
name: "dpn4_concat" | |
type: "Concat" | |
bottom: "dpn4_match_conv_split2" | |
bottom: "dpn4_conv3_branch2" | |
top: "dpn4_concat" | |
} | |
#################### dpn5 #################### | |
layer { | |
name: "dpn5_concat_input" | |
type: "Concat" | |
bottom: "dpn4_elewise" | |
bottom: "dpn4_concat" | |
top: "dpn5_concat_input" | |
} | |
layer { | |
name: "dpn5_bn" | |
type: "BatchNorm" | |
bottom: "dpn5_concat_input" | |
top: "dpn5_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn5_relu" | |
type: "ReLU" | |
bottom: "dpn5_bn" | |
top: "dpn5_bn" | |
} | |
layer { | |
name: "dpn5_conv1" | |
type: "Convolution" | |
bottom: "dpn5_bn" | |
top: "dpn5_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn5_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn5_conv1" | |
top: "dpn5_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn5_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn5_conv1_bn" | |
top: "dpn5_conv1_bn" | |
} | |
layer { | |
name: "dpn5_conv2" | |
type: "Convolution" | |
bottom: "dpn5_conv1_bn" | |
top: "dpn5_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn5_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn5_conv2" | |
top: "dpn5_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn5_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn5_conv2_bn" | |
top: "dpn5_conv2_bn" | |
} | |
layer { | |
name: "dpn5_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn5_conv2_bn" | |
top: "dpn5_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn5_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn5_conv2_bn" | |
top: "dpn5_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn5_elewise" | |
type: "Eltwise" | |
bottom: "dpn4_elewise" | |
bottom: "dpn5_conv3_branch1" | |
top: "dpn5_elewise" | |
} | |
layer { | |
name: "dpn5_concat" | |
type: "Concat" | |
bottom: "dpn4_concat" | |
bottom: "dpn5_conv3_branch2" | |
top: "dpn5_concat" | |
} | |
#################### dpn6 #################### | |
layer { | |
name: "dpn6_concat_input" | |
type: "Concat" | |
bottom: "dpn5_elewise" | |
bottom: "dpn5_concat" | |
top: "dpn6_concat_input" | |
} | |
layer { | |
name: "dpn6_bn" | |
type: "BatchNorm" | |
bottom: "dpn6_concat_input" | |
top: "dpn6_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn6_relu" | |
type: "ReLU" | |
bottom: "dpn6_bn" | |
top: "dpn6_bn" | |
} | |
layer { | |
name: "dpn6_conv1" | |
type: "Convolution" | |
bottom: "dpn6_bn" | |
top: "dpn6_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn6_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn6_conv1" | |
top: "dpn6_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn6_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn6_conv1_bn" | |
top: "dpn6_conv1_bn" | |
} | |
layer { | |
name: "dpn6_conv2" | |
type: "Convolution" | |
bottom: "dpn6_conv1_bn" | |
top: "dpn6_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn6_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn6_conv2" | |
top: "dpn6_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn6_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn6_conv2_bn" | |
top: "dpn6_conv2_bn" | |
} | |
layer { | |
name: "dpn6_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn6_conv2_bn" | |
top: "dpn6_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn6_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn6_conv2_bn" | |
top: "dpn6_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn6_elewise" | |
type: "Eltwise" | |
bottom: "dpn5_elewise" | |
bottom: "dpn6_conv3_branch1" | |
top: "dpn6_elewise" | |
} | |
layer { | |
name: "dpn6_concat" | |
type: "Concat" | |
bottom: "dpn5_concat" | |
bottom: "dpn6_conv3_branch2" | |
top: "dpn6_concat" | |
} | |
#################### dpn7 #################### | |
layer { | |
name: "dpn7_concat_input" | |
type: "Concat" | |
bottom: "dpn6_elewise" | |
bottom: "dpn6_concat" | |
top: "dpn7_concat_input" | |
} | |
layer { | |
name: "dpn7_bn" | |
type: "BatchNorm" | |
bottom: "dpn7_concat_input" | |
top: "dpn7_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn7_relu" | |
type: "ReLU" | |
bottom: "dpn7_bn" | |
top: "dpn7_bn" | |
} | |
layer { | |
name: "dpn7_conv1" | |
type: "Convolution" | |
bottom: "dpn7_bn" | |
top: "dpn7_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn7_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn7_conv1" | |
top: "dpn7_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn7_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn7_conv1_bn" | |
top: "dpn7_conv1_bn" | |
} | |
layer { | |
name: "dpn7_conv2" | |
type: "Convolution" | |
bottom: "dpn7_conv1_bn" | |
top: "dpn7_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn7_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn7_conv2" | |
top: "dpn7_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn7_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn7_conv2_bn" | |
top: "dpn7_conv2_bn" | |
} | |
layer { | |
name: "dpn7_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn7_conv2_bn" | |
top: "dpn7_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 128 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn7_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn7_conv2_bn" | |
top: "dpn7_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn7_elewise" | |
type: "Eltwise" | |
bottom: "dpn6_elewise" | |
bottom: "dpn7_conv3_branch1" | |
top: "dpn7_elewise" | |
} | |
layer { | |
name: "dpn7_concat" | |
type: "Concat" | |
bottom: "dpn6_concat" | |
bottom: "dpn7_conv3_branch2" | |
top: "dpn7_concat" | |
} | |
#################### dpn8 #################### | |
layer { | |
name: "dpn8_concat_input" | |
type: "Concat" | |
bottom: "dpn7_elewise" | |
bottom: "dpn7_concat" | |
top: "dpn8_concat_input" | |
} | |
layer { | |
name: "dpn8_match_bn" | |
type: "BatchNorm" | |
bottom: "dpn8_concat_input" | |
top: "dpn8_match_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn8_match_relu" | |
type: "ReLU" | |
bottom: "dpn8_match_bn" | |
top: "dpn8_match_bn" | |
} | |
layer { | |
name: "dpn8_match_conv" | |
type: "Convolution" | |
bottom: "dpn8_match_bn" | |
top: "dpn8_match_conv" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 320 | |
kernel_size: 1 | |
pad: 0 | |
stride: 2 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn8_match_conv_Slice" | |
type: "Slice" | |
bottom: "dpn8_match_conv" | |
top: "dpn8_match_conv_split1" | |
top: "dpn8_match_conv_split2" #0-255, 256~319 | |
slice_param { | |
axis: 1 | |
slice_point: 256 | |
} | |
} | |
layer { | |
name: "dpn8_bn" | |
type: "BatchNorm" | |
bottom: "dpn8_concat_input" | |
top: "dpn8_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn8_relu" | |
type: "ReLU" | |
bottom: "dpn8_bn" | |
top: "dpn8_bn" | |
} | |
layer { | |
name: "dpn8_conv1" | |
type: "Convolution" | |
bottom: "dpn8_bn" | |
top: "dpn8_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn8_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn8_conv1" | |
top: "dpn8_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn8_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn8_conv1_bn" | |
top: "dpn8_conv1_bn" | |
} | |
layer { | |
name: "dpn8_conv2" | |
type: "Convolution" | |
bottom: "dpn8_conv1_bn" | |
top: "dpn8_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 2 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn8_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn8_conv2" | |
top: "dpn8_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn8_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn8_conv2_bn" | |
top: "dpn8_conv2_bn" | |
} | |
layer { | |
name: "dpn8_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn8_conv2_bn" | |
top: "dpn8_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn8_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn8_conv2_bn" | |
top: "dpn8_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn8_elewise" | |
type: "Eltwise" | |
bottom: "dpn8_match_conv_split1" | |
bottom: "dpn8_conv3_branch1" | |
top: "dpn8_elewise" | |
} | |
layer { | |
name: "dpn8_concat" | |
type: "Concat" | |
bottom: "dpn8_match_conv_split2" | |
bottom: "dpn8_conv3_branch2" | |
top: "dpn8_concat" | |
} | |
#################### dpn9 #################### | |
layer { | |
name: "dpn9_concat_input" | |
type: "Concat" | |
bottom: "dpn8_elewise" | |
bottom: "dpn8_concat" | |
top: "dpn9_concat_input" | |
} | |
layer { | |
name: "dpn9_bn" | |
type: "BatchNorm" | |
bottom: "dpn9_concat_input" | |
top: "dpn9_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn9_relu" | |
type: "ReLU" | |
bottom: "dpn9_bn" | |
top: "dpn9_bn" | |
} | |
layer { | |
name: "dpn9_conv1" | |
type: "Convolution" | |
bottom: "dpn9_bn" | |
top: "dpn9_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn9_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn9_conv1" | |
top: "dpn9_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn9_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn9_conv1_bn" | |
top: "dpn9_conv1_bn" | |
} | |
layer { | |
name: "dpn9_conv2" | |
type: "Convolution" | |
bottom: "dpn9_conv1_bn" | |
top: "dpn9_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn9_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn9_conv2" | |
top: "dpn9_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn9_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn9_conv2_bn" | |
top: "dpn9_conv2_bn" | |
} | |
layer { | |
name: "dpn9_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn9_conv2_bn" | |
top: "dpn9_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn9_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn9_conv2_bn" | |
top: "dpn9_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn9_elewise" | |
type: "Eltwise" | |
bottom: "dpn8_elewise" | |
bottom: "dpn9_conv3_branch1" | |
top: "dpn9_elewise" | |
} | |
layer { | |
name: "dpn9_concat" | |
type: "Concat" | |
bottom: "dpn8_concat" | |
bottom: "dpn9_conv3_branch2" | |
top: "dpn9_concat" | |
} | |
#################### dpn10 #################### | |
layer { | |
name: "dpn10_concat_input" | |
type: "Concat" | |
bottom: "dpn9_elewise" | |
bottom: "dpn9_concat" | |
top: "dpn10_concat_input" | |
} | |
layer { | |
name: "dpn10_bn" | |
type: "BatchNorm" | |
bottom: "dpn10_concat_input" | |
top: "dpn10_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn10_relu" | |
type: "ReLU" | |
bottom: "dpn10_bn" | |
top: "dpn10_bn" | |
} | |
layer { | |
name: "dpn10_conv1" | |
type: "Convolution" | |
bottom: "dpn10_bn" | |
top: "dpn10_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn10_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn10_conv1" | |
top: "dpn10_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn10_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn10_conv1_bn" | |
top: "dpn10_conv1_bn" | |
} | |
layer { | |
name: "dpn10_conv2" | |
type: "Convolution" | |
bottom: "dpn10_conv1_bn" | |
top: "dpn10_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn10_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn10_conv2" | |
top: "dpn10_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn10_conv2_relu" | |
top: "dpn10_conv2_bn" | |
bottom: "dpn10_conv2_bn" | |
type: "ReLU" | |
} | |
layer { | |
name: "dpn10_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn10_conv2_bn" | |
top: "dpn10_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn10_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn10_conv2_bn" | |
top: "dpn10_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn10_elewise" | |
type: "Eltwise" | |
bottom: "dpn9_elewise" | |
bottom: "dpn10_conv3_branch1" | |
top: "dpn10_elewise" | |
} | |
layer { | |
name: "dpn10_concat" | |
type: "Concat" | |
bottom: "dpn9_concat" | |
bottom: "dpn10_conv3_branch2" | |
top: "dpn10_concat" | |
} | |
#################### dpn11 #################### | |
layer { | |
name: "dpn11_concat_input" | |
type: "Concat" | |
bottom: "dpn10_elewise" | |
bottom: "dpn10_concat" | |
top: "dpn11_concat_input" | |
} | |
layer { | |
name: "dpn11_bn" | |
type: "BatchNorm" | |
bottom: "dpn11_concat_input" | |
top: "dpn11_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn11_relu" | |
type: "ReLU" | |
bottom: "dpn11_bn" | |
top: "dpn11_bn" | |
} | |
layer { | |
name: "dpn11_conv1" | |
bottom: "dpn11_bn" | |
top: "dpn11_conv1" | |
type: "Convolution" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn11_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn11_conv1" | |
top: "dpn11_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn11_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn11_conv1_bn" | |
top: "dpn11_conv1_bn" | |
} | |
layer { | |
name: "dpn11_conv2" | |
type: "Convolution" | |
bottom: "dpn11_conv1_bn" | |
top: "dpn11_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn11_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn11_conv2" | |
top: "dpn11_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn11_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn11_conv2_bn" | |
top: "dpn11_conv2_bn" | |
} | |
layer { | |
name: "dpn11_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn11_conv2_bn" | |
top: "dpn11_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn11_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn11_conv2_bn" | |
top: "dpn11_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn11_elewise" | |
type: "Eltwise" | |
bottom: "dpn10_elewise" | |
bottom: "dpn11_conv3_branch1" | |
top: "dpn11_elewise" | |
} | |
layer { | |
name: "dpn11_concat" | |
type: "Concat" | |
bottom: "dpn10_concat" | |
bottom: "dpn11_conv3_branch2" | |
top: "dpn11_concat" | |
} | |
#################### dpn12 #################### | |
layer { | |
name: "dpn12_concat_input" | |
type: "Concat" | |
bottom: "dpn11_elewise" | |
bottom: "dpn11_concat" | |
top: "dpn12_concat_input" | |
} | |
layer { | |
name: "dpn12_bn" | |
type: "BatchNorm" | |
bottom: "dpn12_concat_input" | |
top: "dpn12_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn12_relu" | |
type: "ReLU" | |
bottom: "dpn12_bn" | |
top: "dpn12_bn" | |
} | |
layer { | |
name: "dpn12_conv1" | |
type: "Convolution" | |
bottom: "dpn12_bn" | |
top: "dpn12_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn12_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn12_conv1" | |
top: "dpn12_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn12_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn12_conv1_bn" | |
top: "dpn12_conv1_bn" | |
} | |
layer { | |
name: "dpn12_conv2" | |
type: "Convolution" | |
bottom: "dpn12_conv1_bn" | |
top: "dpn12_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn12_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn12_conv2" | |
top: "dpn12_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn12_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn12_conv2_bn" | |
top: "dpn12_conv2_bn" | |
} | |
layer { | |
name: "dpn12_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn12_conv2_bn" | |
top: "dpn12_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn12_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn12_conv2_bn" | |
top: "dpn12_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn12_elewise" | |
type: "Eltwise" | |
bottom: "dpn11_elewise" | |
bottom: "dpn12_conv3_branch1" | |
top: "dpn12_elewise" | |
} | |
layer { | |
name: "dpn12_concat" | |
type: "Concat" | |
bottom: "dpn11_concat" | |
bottom: "dpn12_conv3_branch2" | |
top: "dpn12_concat" | |
} | |
#################### dpn13 #################### | |
layer { | |
name: "dpn13_concat_input" | |
type: "Concat" | |
bottom: "dpn12_elewise" | |
bottom: "dpn12_concat" | |
top: "dpn13_concat_input" | |
} | |
layer { | |
name: "dpn13_bn" | |
type: "BatchNorm" | |
bottom: "dpn13_concat_input" | |
top: "dpn13_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn13_relu" | |
type: "ReLU" | |
bottom: "dpn13_bn" | |
top: "dpn13_bn" | |
} | |
layer { | |
name: "dpn13_conv1" | |
type: "Convolution" | |
bottom: "dpn13_bn" | |
top: "dpn13_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn13_conv1_bn" | |
bottom: "dpn13_conv1" | |
top: "dpn13_conv1_bn" | |
type: "BatchNorm" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn13_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn13_conv1_bn" | |
top: "dpn13_conv1_bn" | |
} | |
layer { | |
name: "dpn13_conv2" | |
type: "Convolution" | |
bottom: "dpn13_conv1_bn" | |
top: "dpn13_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn13_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn13_conv2" | |
top: "dpn13_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn13_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn13_conv2_bn" | |
top: "dpn13_conv2_bn" | |
} | |
layer { | |
name: "dpn13_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn13_conv2_bn" | |
top: "dpn13_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn13_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn13_conv2_bn" | |
top: "dpn13_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn13_elewise" | |
type: "Eltwise" | |
bottom: "dpn12_elewise" | |
bottom: "dpn13_conv3_branch1" | |
top: "dpn13_elewise" | |
} | |
layer { | |
name: "dpn13_concat" | |
type: "Concat" | |
bottom: "dpn12_concat" | |
bottom: "dpn13_conv3_branch2" | |
top: "dpn13_concat" | |
} | |
#################### dpn14 #################### | |
layer { | |
name: "dpn14_concat_input" | |
type: "Concat" | |
bottom: "dpn13_elewise" | |
bottom: "dpn13_concat" | |
top: "dpn14_concat_input" | |
} | |
layer { | |
name: "dpn14_bn" | |
type: "BatchNorm" | |
bottom: "dpn14_concat_input" | |
top: "dpn14_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn14_relu" | |
type: "ReLU" | |
bottom: "dpn14_bn" | |
top: "dpn14_bn" | |
} | |
layer { | |
name: "dpn14_conv1" | |
type: "Convolution" | |
bottom: "dpn14_bn" | |
top: "dpn14_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn14_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn14_conv1" | |
top: "dpn14_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn14_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn14_conv1_bn" | |
top: "dpn14_conv1_bn" | |
} | |
layer { | |
name: "dpn14_conv2" | |
type: "Convolution" | |
bottom: "dpn14_conv1_bn" | |
top: "dpn14_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn14_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn14_conv2" | |
top: "dpn14_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn14_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn14_conv2_bn" | |
top: "dpn14_conv2_bn" | |
} | |
layer { | |
name: "dpn14_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn14_conv2_bn" | |
top: "dpn14_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn14_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn14_conv2_bn" | |
top: "dpn14_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn14_elewise" | |
type: "Eltwise" | |
bottom: "dpn13_elewise" | |
bottom: "dpn14_conv3_branch1" | |
top: "dpn14_elewise" | |
} | |
layer { | |
name: "dpn14_concat" | |
type: "Concat" | |
bottom: "dpn13_concat" | |
bottom: "dpn14_conv3_branch2" | |
top: "dpn14_concat" | |
} | |
#################### dpn15 #################### | |
layer { | |
name: "dpn15_concat_input" | |
type: "Concat" | |
bottom: "dpn14_elewise" | |
bottom: "dpn14_concat" | |
top: "dpn15_concat_input" | |
} | |
layer { | |
name: "dpn15_bn" | |
type: "BatchNorm" | |
bottom: "dpn15_concat_input" | |
top: "dpn15_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn15_relu" | |
type: "ReLU" | |
bottom: "dpn15_bn" | |
top: "dpn15_bn" | |
} | |
layer { | |
name: "dpn15_conv1" | |
type: "Convolution" | |
bottom: "dpn15_bn" | |
top: "dpn15_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn15_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn15_conv1" | |
top: "dpn15_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn15_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn15_conv1_bn" | |
top: "dpn15_conv1_bn" | |
} | |
layer { | |
name: "dpn15_conv2" | |
bottom: "dpn15_conv1_bn" | |
top: "dpn15_conv2" | |
type: "Convolution" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn15_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn15_conv2" | |
top: "dpn15_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn15_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn15_conv2_bn" | |
top: "dpn15_conv2_bn" | |
} | |
layer { | |
name: "dpn15_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn15_conv2_bn" | |
top: "dpn15_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn15_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn15_conv2_bn" | |
top: "dpn15_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn15_elewise" | |
type: "Eltwise" | |
bottom: "dpn14_elewise" | |
bottom: "dpn15_conv3_branch1" | |
top: "dpn15_elewise" | |
} | |
layer { | |
name: "dpn15_concat" | |
type: "Concat" | |
bottom: "dpn14_concat" | |
bottom: "dpn15_conv3_branch2" | |
top: "dpn15_concat" | |
} | |
#################### dpn16 #################### | |
layer { | |
name: "dpn16_concat_input" | |
type: "Concat" | |
bottom: "dpn15_elewise" | |
bottom: "dpn15_concat" | |
top: "dpn16_concat_input" | |
} | |
layer { | |
name: "dpn16_bn" | |
type: "BatchNorm" | |
bottom: "dpn16_concat_input" | |
top: "dpn16_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn16_relu" | |
type: "ReLU" | |
bottom: "dpn16_bn" | |
top: "dpn16_bn" | |
} | |
layer { | |
name: "dpn16_conv1" | |
type: "Convolution" | |
bottom: "dpn16_bn" | |
top: "dpn16_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn16_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn16_conv1" | |
top: "dpn16_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn16_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn16_conv1_bn" | |
top: "dpn16_conv1_bn" | |
} | |
layer { | |
name: "dpn16_conv2" | |
type: "Convolution" | |
bottom: "dpn16_conv1_bn" | |
top: "dpn16_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn16_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn16_conv2" | |
top: "dpn16_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn16_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn16_conv2_bn" | |
top: "dpn16_conv2_bn" | |
} | |
layer { | |
name: "dpn16_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn16_conv2_bn" | |
top: "dpn16_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn16_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn16_conv2_bn" | |
top: "dpn16_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn16_elewise" | |
type: "Eltwise" | |
bottom: "dpn15_elewise" | |
bottom: "dpn16_conv3_branch1" | |
top: "dpn16_elewise" | |
} | |
layer { | |
name: "dpn16_concat" | |
type: "Concat" | |
bottom: "dpn15_concat" | |
bottom: "dpn16_conv3_branch2" | |
top: "dpn16_concat" | |
} | |
#################### dpn17 #################### | |
layer { | |
name: "dpn17_concat_input" | |
type: "Concat" | |
bottom: "dpn16_elewise" | |
bottom: "dpn16_concat" | |
top: "dpn17_concat_input" | |
} | |
layer { | |
name: "dpn17_bn" | |
type: "BatchNorm" | |
bottom: "dpn17_concat_input" | |
top: "dpn17_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn17_relu" | |
type: "ReLU" | |
bottom: "dpn17_bn" | |
top: "dpn17_bn" | |
} | |
layer { | |
name: "dpn17_conv1" | |
type: "Convolution" | |
bottom: "dpn17_bn" | |
top: "dpn17_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn17_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn17_conv1" | |
top: "dpn17_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn17_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn17_conv1_bn" | |
top: "dpn17_conv1_bn" | |
} | |
layer { | |
name: "dpn17_conv2" | |
type: "Convolution" | |
bottom: "dpn17_conv1_bn" | |
top: "dpn17_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn17_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn17_conv2" | |
top: "dpn17_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn17_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn17_conv2_bn" | |
top: "dpn17_conv2_bn" | |
} | |
layer { | |
name: "dpn17_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn17_conv2_bn" | |
top: "dpn17_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn17_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn17_conv2_bn" | |
top: "dpn17_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn17_elewise" | |
type: "Eltwise" | |
bottom: "dpn16_elewise" | |
bottom: "dpn17_conv3_branch1" | |
top: "dpn17_elewise" | |
} | |
layer { | |
name: "dpn17_concat" | |
type: "Concat" | |
bottom: "dpn16_concat" | |
bottom: "dpn17_conv3_branch2" | |
top: "dpn17_concat" | |
} | |
#################### dpn18 #################### | |
layer { | |
name: "dpn18_concat_input" | |
type: "Concat" | |
bottom: "dpn17_elewise" | |
bottom: "dpn17_concat" | |
top: "dpn18_concat_input" | |
} | |
layer { | |
name: "dpn18_bn" | |
type: "BatchNorm" | |
bottom: "dpn18_concat_input" | |
top: "dpn18_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn18_relu" | |
type: "ReLU" | |
bottom: "dpn18_bn" | |
top: "dpn18_bn" | |
} | |
layer { | |
name: "dpn18_conv1" | |
type: "Convolution" | |
bottom: "dpn18_bn" | |
top: "dpn18_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn18_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn18_conv1" | |
top: "dpn18_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn18_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn18_conv1_bn" | |
top: "dpn18_conv1_bn" | |
} | |
layer { | |
name: "dpn18_conv2" | |
type: "Convolution" | |
bottom: "dpn18_conv1_bn" | |
top: "dpn18_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn18_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn18_conv2" | |
top: "dpn18_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn18_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn18_conv2_bn" | |
top: "dpn18_conv2_bn" | |
} | |
layer { | |
name: "dpn18_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn18_conv2_bn" | |
top: "dpn18_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn18_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn18_conv2_bn" | |
top: "dpn18_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn18_elewise" | |
type: "Eltwise" | |
bottom: "dpn17_elewise" | |
bottom: "dpn18_conv3_branch1" | |
top: "dpn18_elewise" | |
} | |
layer { | |
name: "dpn18_concat" | |
type: "Concat" | |
bottom: "dpn17_concat" | |
bottom: "dpn18_conv3_branch2" | |
top: "dpn18_concat" | |
} | |
#################### dpn19 #################### | |
layer { | |
name: "dpn19_concat_input" | |
type: "Concat" | |
bottom: "dpn18_elewise" | |
bottom: "dpn18_concat" | |
top: "dpn19_concat_input" | |
} | |
layer { | |
name: "dpn19_bn" | |
type: "BatchNorm" | |
bottom: "dpn19_concat_input" | |
top: "dpn19_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn19_relu" | |
type: "ReLU" | |
bottom: "dpn19_bn" | |
top: "dpn19_bn" | |
} | |
layer { | |
name: "dpn19_conv1" | |
type: "Convolution" | |
bottom: "dpn19_bn" | |
top: "dpn19_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn19_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn19_conv1" | |
top: "dpn19_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn19_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn19_conv1_bn" | |
top: "dpn19_conv1_bn" | |
} | |
layer { | |
name: "dpn19_conv2" | |
type: "Convolution" | |
bottom: "dpn19_conv1_bn" | |
top: "dpn19_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn19_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn19_conv2" | |
top: "dpn19_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn19_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn19_conv2_bn" | |
top: "dpn19_conv2_bn" | |
} | |
layer { | |
name: "dpn19_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn19_conv2_bn" | |
top: "dpn19_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn19_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn19_conv2_bn" | |
top: "dpn19_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 32 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn19_elewise" | |
type: "Eltwise" | |
bottom: "dpn18_elewise" | |
bottom: "dpn19_conv3_branch1" | |
top: "dpn19_elewise" | |
} | |
layer { | |
name: "dpn19_concat" | |
type: "Concat" | |
bottom: "dpn18_concat" | |
bottom: "dpn19_conv3_branch2" | |
top: "dpn19_concat" | |
} | |
#################### dpn20 #################### | |
layer { | |
name: "dpn20_concat_input" | |
type: "Concat" | |
bottom: "dpn19_elewise" | |
bottom: "dpn19_concat" | |
top: "dpn20_concat_input" | |
} | |
layer { | |
name: "dpn20_match_bn" | |
type: "BatchNorm" | |
bottom: "dpn20_concat_input" | |
top: "dpn20_match_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn20_match_relu" | |
type: "ReLU" | |
bottom: "dpn20_match_bn" | |
top: "dpn20_match_bn" | |
} | |
layer { | |
name: "dpn20_match_conv" | |
type: "Convolution" | |
bottom: "dpn20_match_bn" | |
top: "dpn20_match_conv" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 640 | |
kernel_size: 1 | |
pad: 0 | |
stride: 2 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn20_match_conv_Slice" | |
type: "Slice" | |
bottom: "dpn20_match_conv" | |
top: "dpn20_match_conv_split1" | |
top: "dpn20_match_conv_split2" # 0~511, 512~639 | |
slice_param { | |
axis: 1 | |
slice_point: 512 | |
} | |
} | |
layer { | |
name: "dpn20_bn" | |
type: "BatchNorm" | |
bottom: "dpn20_concat_input" | |
top: "dpn20_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn20_relu" | |
type: "ReLU" | |
bottom: "dpn20_bn" | |
top: "dpn20_bn" | |
} | |
layer { | |
name: "dpn20_conv1" | |
type: "Convolution" | |
bottom: "dpn20_bn" | |
top: "dpn20_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn20_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn20_conv1" | |
top: "dpn20_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn20_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn20_conv1_bn" | |
top: "dpn20_conv1_bn" | |
} | |
layer { | |
name: "dpn20_conv2" | |
type: "Convolution" | |
bottom: "dpn20_conv1_bn" | |
top: "dpn20_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 3 | |
pad: 1 | |
stride: 2 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn20_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn20_conv2" | |
top: "dpn20_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn20_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn20_conv2_bn" | |
top: "dpn20_conv2_bn" | |
} | |
layer { | |
name: "dpn20_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn20_conv2_bn" | |
top: "dpn20_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn20_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn20_conv2_bn" | |
top: "dpn20_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn20_elewise" | |
type: "Eltwise" | |
bottom: "dpn20_match_conv_split1" | |
bottom: "dpn20_conv3_branch1" | |
top: "dpn20_elewise" | |
} | |
layer { | |
name: "dpn20_concat" | |
type: "Concat" | |
bottom: "dpn20_match_conv_split2" | |
bottom: "dpn20_conv3_branch2" | |
top: "dpn20_concat" | |
} | |
#################### dpn21 #################### | |
layer { | |
name: "dpn21_concat_input" | |
type: "Concat" | |
bottom: "dpn20_elewise" | |
bottom: "dpn20_concat" | |
top: "dpn21_concat_input" | |
} | |
layer { | |
name: "dpn21_bn" | |
type: "BatchNorm" | |
bottom: "dpn21_concat_input" | |
top: "dpn21_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn21_relu" | |
type: "ReLU" | |
bottom: "dpn21_bn" | |
top: "dpn21_bn" | |
} | |
layer { | |
name: "dpn21_conv1" | |
type: "Convolution" | |
bottom: "dpn21_bn" | |
top: "dpn21_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn21_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn21_conv1" | |
top: "dpn21_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn21_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn21_conv1_bn" | |
top: "dpn21_conv1_bn" | |
} | |
layer { | |
name: "dpn21_conv2" | |
type: "Convolution" | |
bottom: "dpn21_conv1_bn" | |
top: "dpn21_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn21_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn21_conv2" | |
top: "dpn21_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn21_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn21_conv2_bn" | |
top: "dpn21_conv2_bn" | |
} | |
layer { | |
name: "dpn21_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn21_conv2_bn" | |
top: "dpn21_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn21_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn21_conv2_bn" | |
top: "dpn21_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn21_elewise" | |
type: "Eltwise" | |
bottom: "dpn20_elewise" | |
bottom: "dpn21_conv3_branch1" | |
top: "dpn21_elewise" | |
} | |
layer { | |
name: "dpn21_concat" | |
type: "Concat" | |
bottom: "dpn20_concat" | |
bottom: "dpn21_conv3_branch2" | |
top: "dpn21_concat" | |
} | |
#################### dpn22 #################### | |
layer { | |
name: "dpn22_concat_input" | |
type: "Concat" | |
bottom: "dpn21_elewise" | |
bottom: "dpn21_concat" | |
top: "dpn22_concat_input" | |
} | |
layer { | |
name: "dpn22_bn" | |
type: "BatchNorm" | |
bottom: "dpn22_concat_input" | |
top: "dpn22_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn22_relu" | |
type: "ReLU" | |
bottom: "dpn22_bn" | |
top: "dpn22_bn" | |
} | |
layer { | |
name: "dpn22_conv1" | |
type: "Convolution" | |
bottom: "dpn22_bn" | |
top: "dpn22_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn22_conv1_bn" | |
type: "BatchNorm" | |
bottom: "dpn22_conv1" | |
top: "dpn22_conv1_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn22_conv1_relu" | |
type: "ReLU" | |
bottom: "dpn22_conv1_bn" | |
top: "dpn22_conv1_bn" | |
} | |
layer { | |
name: "dpn22_conv2" | |
type: "Convolution" | |
bottom: "dpn22_conv1_bn" | |
top: "dpn22_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
group: 32 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn22_conv2_bn" | |
type: "BatchNorm" | |
bottom: "dpn22_conv2" | |
top: "dpn22_conv2_bn" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn22_conv2_relu" | |
type: "ReLU" | |
bottom: "dpn22_conv2_bn" | |
top: "dpn22_conv2_bn" | |
} | |
layer { | |
name: "dpn22_conv3_branch1" | |
type: "Convolution" | |
bottom: "dpn22_conv2_bn" | |
top: "dpn22_conv3_branch1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn22_conv3_branch2" | |
type: "Convolution" | |
bottom: "dpn22_conv2_bn" | |
top: "dpn22_conv3_branch2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
convolution_param { | |
num_output: 64 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
group: 1 | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "dpn22_elewise" | |
type: "Eltwise" | |
bottom: "dpn21_elewise" | |
bottom: "dpn22_conv3_branch1" | |
top: "dpn22_elewise" | |
} | |
layer { | |
name: "dpn22_concat" | |
type: "Concat" | |
bottom: "dpn21_concat" | |
bottom: "dpn22_conv3_branch2" | |
top: "dpn22_concat" | |
} | |
#################### pool_ave #################### | |
layer { | |
name: "pool_ave_concat_input" | |
type: "Concat" | |
bottom: "dpn22_elewise" | |
bottom: "dpn22_concat" | |
top: "pool_ave_concat_input" | |
} | |
layer { | |
name: "pool_ave_concat_bn" | |
bottom: "pool_ave_concat_input" | |
top: "pool_ave_concat_bn" | |
type: "BatchNorm" | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
param { | |
lr_mult: 1 | |
decay_mult: 0 | |
} | |
batch_norm_param { | |
scale_filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "pool_ave_concat_relu" | |
type: "ReLU" | |
bottom: "pool_ave_concat_bn" | |
top: "pool_ave_concat_bn" | |
} | |
layer { | |
name: "pool_ave" | |
type: "Pooling" | |
bottom: "pool_ave_concat_bn" | |
top: "pool_ave" | |
pooling_param { | |
global_pooling : true | |
pool: AVE | |
} | |
} | |
layer { | |
name: "pool_ave_flat" | |
type: "Flatten" | |
bottom: "pool_ave" | |
top: "pool_ave_flat" | |
flatten_param { | |
axis: 1 | |
} | |
} | |
layer { | |
name: "fclayer" | |
type: "InnerProduct" | |
bottom: "pool_ave_flat" | |
top: "fclayer" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
decay_mult: 0 | |
} | |
inner_product_param { | |
weight_filler { | |
type: "msra" | |
std: 0.01 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
layer { | |
name: "accuracy" | |
type: "Accuracy" | |
bottom: "fclayer" | |
bottom: "label" | |
top: "accuracy" | |
include { stage: "val" } | |
} | |
layer { | |
name: "loss" | |
type: "SoftmaxWithLoss" | |
bottom: "fclayer" | |
bottom: "label" | |
top: "loss" | |
exclude { stage: "deploy" } | |
} | |
layer { | |
name: "softmax" | |
type: "Softmax" | |
bottom: "fclayer" | |
top: "softmax" | |
include { stage: "deploy" } | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment