Created
October 29, 2018 03:53
-
-
Save KaleidoZhouYN/09aae0a7b7bb5b15b0d6f5c03af115e5 to your computer and use it in GitHub Desktop.
Invert Residual construct with t=4 of RseNet-50
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
layer { | |
name: "data" | |
type: "Input" | |
top: "data" | |
input_param { | |
shape { | |
dim: 1 | |
dim: 3 | |
dim: 112 | |
dim: 112 | |
} | |
} | |
} | |
layer { | |
name: "ConvNd_1" | |
type: "Convolution" | |
bottom: "data" | |
top: "ConvNd_1" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_1" | |
type: "BatchNorm" | |
bottom: "ConvNd_1" | |
top: "BatchNorm_1" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_1_scale" | |
type: "Scale" | |
bottom: "BatchNorm_1" | |
top: "BatchNorm_1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_1" | |
type: "PReLU" | |
bottom: "BatchNorm_1" | |
top: "PReLU_1" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_2" | |
type: "Convolution" | |
bottom: "PReLU_1" | |
top: "ConvNd_2" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_2" | |
type: "BatchNorm" | |
bottom: "ConvNd_2" | |
top: "BatchNorm_2" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_2_scale" | |
type: "Scale" | |
bottom: "BatchNorm_2" | |
top: "BatchNorm_2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "ConvNd_3" | |
type: "Convolution" | |
bottom: "BatchNorm_2" | |
top: "ConvNd_3" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_3" | |
type: "BatchNorm" | |
bottom: "ConvNd_3" | |
top: "BatchNorm_3" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_3_scale" | |
type: "Scale" | |
bottom: "BatchNorm_3" | |
top: "BatchNorm_3" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "ConvNd_4" | |
type: "Convolution" | |
bottom: "BatchNorm_2" | |
top: "ConvNd_4" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_4" | |
type: "BatchNorm" | |
bottom: "ConvNd_4" | |
top: "BatchNorm_4" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_4_scale" | |
type: "Scale" | |
bottom: "BatchNorm_4" | |
top: "BatchNorm_4" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_2" | |
type: "PReLU" | |
bottom: "BatchNorm_4" | |
top: "PReLU_2" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_5" | |
type: "Convolution" | |
bottom: "PReLU_2" | |
top: "ConvNd_5" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_5" | |
type: "BatchNorm" | |
bottom: "ConvNd_5" | |
top: "BatchNorm_5" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_5_scale" | |
type: "Scale" | |
bottom: "BatchNorm_5" | |
top: "BatchNorm_5" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_1" | |
type: "Eltwise" | |
bottom: "BatchNorm_3" | |
bottom: "BatchNorm_5" | |
top: "Add_1" | |
} | |
layer { | |
name: "ConvNd_6" | |
type: "Convolution" | |
bottom: "Add_1" | |
top: "ConvNd_6" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_6" | |
type: "BatchNorm" | |
bottom: "ConvNd_6" | |
top: "BatchNorm_6" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_6_scale" | |
type: "Scale" | |
bottom: "BatchNorm_6" | |
top: "BatchNorm_6" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_3" | |
type: "PReLU" | |
bottom: "BatchNorm_6" | |
top: "PReLU_3" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_7" | |
type: "Convolution" | |
bottom: "PReLU_3" | |
top: "ConvNd_7" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_7" | |
type: "BatchNorm" | |
bottom: "ConvNd_7" | |
top: "BatchNorm_7" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_7_scale" | |
type: "Scale" | |
bottom: "BatchNorm_7" | |
top: "BatchNorm_7" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_2" | |
type: "Eltwise" | |
bottom: "Add_1" | |
bottom: "BatchNorm_7" | |
top: "Add_2" | |
} | |
layer { | |
name: "ConvNd_8" | |
type: "Convolution" | |
bottom: "Add_2" | |
top: "ConvNd_8" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_8" | |
type: "BatchNorm" | |
bottom: "ConvNd_8" | |
top: "BatchNorm_8" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_8_scale" | |
type: "Scale" | |
bottom: "BatchNorm_8" | |
top: "BatchNorm_8" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_4" | |
type: "PReLU" | |
bottom: "BatchNorm_8" | |
top: "PReLU_4" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_9" | |
type: "Convolution" | |
bottom: "PReLU_4" | |
top: "ConvNd_9" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_9" | |
type: "BatchNorm" | |
bottom: "ConvNd_9" | |
top: "BatchNorm_9" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_9_scale" | |
type: "Scale" | |
bottom: "BatchNorm_9" | |
top: "BatchNorm_9" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_3" | |
type: "Eltwise" | |
bottom: "Add_2" | |
bottom: "BatchNorm_9" | |
top: "Add_3" | |
} | |
layer { | |
name: "ConvNd_10" | |
type: "Convolution" | |
bottom: "Add_3" | |
top: "ConvNd_10" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_10" | |
type: "BatchNorm" | |
bottom: "ConvNd_10" | |
top: "BatchNorm_10" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_10_scale" | |
type: "Scale" | |
bottom: "BatchNorm_10" | |
top: "BatchNorm_10" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "ConvNd_11" | |
type: "Convolution" | |
bottom: "Add_3" | |
top: "ConvNd_11" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_11" | |
type: "BatchNorm" | |
bottom: "ConvNd_11" | |
top: "BatchNorm_11" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_11_scale" | |
type: "Scale" | |
bottom: "BatchNorm_11" | |
top: "BatchNorm_11" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_5" | |
type: "PReLU" | |
bottom: "BatchNorm_11" | |
top: "PReLU_5" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_12" | |
type: "Convolution" | |
bottom: "PReLU_5" | |
top: "ConvNd_12" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_12" | |
type: "BatchNorm" | |
bottom: "ConvNd_12" | |
top: "BatchNorm_12" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_12_scale" | |
type: "Scale" | |
bottom: "BatchNorm_12" | |
top: "BatchNorm_12" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_4" | |
type: "Eltwise" | |
bottom: "BatchNorm_10" | |
bottom: "BatchNorm_12" | |
top: "Add_4" | |
} | |
layer { | |
name: "ConvNd_13" | |
type: "Convolution" | |
bottom: "Add_4" | |
top: "ConvNd_13" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_13" | |
type: "BatchNorm" | |
bottom: "ConvNd_13" | |
top: "BatchNorm_13" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_13_scale" | |
type: "Scale" | |
bottom: "BatchNorm_13" | |
top: "BatchNorm_13" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_6" | |
type: "PReLU" | |
bottom: "BatchNorm_13" | |
top: "PReLU_6" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_14" | |
type: "Convolution" | |
bottom: "PReLU_6" | |
top: "ConvNd_14" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_14" | |
type: "BatchNorm" | |
bottom: "ConvNd_14" | |
top: "BatchNorm_14" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_14_scale" | |
type: "Scale" | |
bottom: "BatchNorm_14" | |
top: "BatchNorm_14" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_5" | |
type: "Eltwise" | |
bottom: "Add_4" | |
bottom: "BatchNorm_14" | |
top: "Add_5" | |
} | |
layer { | |
name: "ConvNd_15" | |
type: "Convolution" | |
bottom: "Add_5" | |
top: "ConvNd_15" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_15" | |
type: "BatchNorm" | |
bottom: "ConvNd_15" | |
top: "BatchNorm_15" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_15_scale" | |
type: "Scale" | |
bottom: "BatchNorm_15" | |
top: "BatchNorm_15" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_7" | |
type: "PReLU" | |
bottom: "BatchNorm_15" | |
top: "PReLU_7" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_16" | |
type: "Convolution" | |
bottom: "PReLU_7" | |
top: "ConvNd_16" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_16" | |
type: "BatchNorm" | |
bottom: "ConvNd_16" | |
top: "BatchNorm_16" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_16_scale" | |
type: "Scale" | |
bottom: "BatchNorm_16" | |
top: "BatchNorm_16" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_6" | |
type: "Eltwise" | |
bottom: "Add_5" | |
bottom: "BatchNorm_16" | |
top: "Add_6" | |
} | |
layer { | |
name: "ConvNd_17" | |
type: "Convolution" | |
bottom: "Add_6" | |
top: "ConvNd_17" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_17" | |
type: "BatchNorm" | |
bottom: "ConvNd_17" | |
top: "BatchNorm_17" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_17_scale" | |
type: "Scale" | |
bottom: "BatchNorm_17" | |
top: "BatchNorm_17" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_8" | |
type: "PReLU" | |
bottom: "BatchNorm_17" | |
top: "PReLU_8" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_18" | |
type: "Convolution" | |
bottom: "PReLU_8" | |
top: "ConvNd_18" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_18" | |
type: "BatchNorm" | |
bottom: "ConvNd_18" | |
top: "BatchNorm_18" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_18_scale" | |
type: "Scale" | |
bottom: "BatchNorm_18" | |
top: "BatchNorm_18" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_7" | |
type: "Eltwise" | |
bottom: "Add_6" | |
bottom: "BatchNorm_18" | |
top: "Add_7" | |
} | |
layer { | |
name: "ConvNd_19" | |
type: "Convolution" | |
bottom: "Add_7" | |
top: "ConvNd_19" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_19" | |
type: "BatchNorm" | |
bottom: "ConvNd_19" | |
top: "BatchNorm_19" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_19_scale" | |
type: "Scale" | |
bottom: "BatchNorm_19" | |
top: "BatchNorm_19" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "ConvNd_20" | |
type: "Convolution" | |
bottom: "Add_7" | |
top: "ConvNd_20" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_20" | |
type: "BatchNorm" | |
bottom: "ConvNd_20" | |
top: "BatchNorm_20" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_20_scale" | |
type: "Scale" | |
bottom: "BatchNorm_20" | |
top: "BatchNorm_20" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_9" | |
type: "PReLU" | |
bottom: "BatchNorm_20" | |
top: "PReLU_9" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_21" | |
type: "Convolution" | |
bottom: "PReLU_9" | |
top: "ConvNd_21" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_21" | |
type: "BatchNorm" | |
bottom: "ConvNd_21" | |
top: "BatchNorm_21" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_21_scale" | |
type: "Scale" | |
bottom: "BatchNorm_21" | |
top: "BatchNorm_21" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_8" | |
type: "Eltwise" | |
bottom: "BatchNorm_19" | |
bottom: "BatchNorm_21" | |
top: "Add_8" | |
} | |
layer { | |
name: "ConvNd_22" | |
type: "Convolution" | |
bottom: "Add_8" | |
top: "ConvNd_22" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_22" | |
type: "BatchNorm" | |
bottom: "ConvNd_22" | |
top: "BatchNorm_22" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_22_scale" | |
type: "Scale" | |
bottom: "BatchNorm_22" | |
top: "BatchNorm_22" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_10" | |
type: "PReLU" | |
bottom: "BatchNorm_22" | |
top: "PReLU_10" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_23" | |
type: "Convolution" | |
bottom: "PReLU_10" | |
top: "ConvNd_23" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_23" | |
type: "BatchNorm" | |
bottom: "ConvNd_23" | |
top: "BatchNorm_23" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_23_scale" | |
type: "Scale" | |
bottom: "BatchNorm_23" | |
top: "BatchNorm_23" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_9" | |
type: "Eltwise" | |
bottom: "Add_8" | |
bottom: "BatchNorm_23" | |
top: "Add_9" | |
} | |
layer { | |
name: "ConvNd_24" | |
type: "Convolution" | |
bottom: "Add_9" | |
top: "ConvNd_24" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_24" | |
type: "BatchNorm" | |
bottom: "ConvNd_24" | |
top: "BatchNorm_24" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_24_scale" | |
type: "Scale" | |
bottom: "BatchNorm_24" | |
top: "BatchNorm_24" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_11" | |
type: "PReLU" | |
bottom: "BatchNorm_24" | |
top: "PReLU_11" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_25" | |
type: "Convolution" | |
bottom: "PReLU_11" | |
top: "ConvNd_25" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_25" | |
type: "BatchNorm" | |
bottom: "ConvNd_25" | |
top: "BatchNorm_25" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_25_scale" | |
type: "Scale" | |
bottom: "BatchNorm_25" | |
top: "BatchNorm_25" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_10" | |
type: "Eltwise" | |
bottom: "Add_9" | |
bottom: "BatchNorm_25" | |
top: "Add_10" | |
} | |
layer { | |
name: "ConvNd_26" | |
type: "Convolution" | |
bottom: "Add_10" | |
top: "ConvNd_26" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_26" | |
type: "BatchNorm" | |
bottom: "ConvNd_26" | |
top: "BatchNorm_26" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_26_scale" | |
type: "Scale" | |
bottom: "BatchNorm_26" | |
top: "BatchNorm_26" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_12" | |
type: "PReLU" | |
bottom: "BatchNorm_26" | |
top: "PReLU_12" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_27" | |
type: "Convolution" | |
bottom: "PReLU_12" | |
top: "ConvNd_27" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_27" | |
type: "BatchNorm" | |
bottom: "ConvNd_27" | |
top: "BatchNorm_27" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_27_scale" | |
type: "Scale" | |
bottom: "BatchNorm_27" | |
top: "BatchNorm_27" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_11" | |
type: "Eltwise" | |
bottom: "Add_10" | |
bottom: "BatchNorm_27" | |
top: "Add_11" | |
} | |
layer { | |
name: "ConvNd_28" | |
type: "Convolution" | |
bottom: "Add_11" | |
top: "ConvNd_28" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_28" | |
type: "BatchNorm" | |
bottom: "ConvNd_28" | |
top: "BatchNorm_28" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_28_scale" | |
type: "Scale" | |
bottom: "BatchNorm_28" | |
top: "BatchNorm_28" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_13" | |
type: "PReLU" | |
bottom: "BatchNorm_28" | |
top: "PReLU_13" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_29" | |
type: "Convolution" | |
bottom: "PReLU_13" | |
top: "ConvNd_29" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_29" | |
type: "BatchNorm" | |
bottom: "ConvNd_29" | |
top: "BatchNorm_29" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_29_scale" | |
type: "Scale" | |
bottom: "BatchNorm_29" | |
top: "BatchNorm_29" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_12" | |
type: "Eltwise" | |
bottom: "Add_11" | |
bottom: "BatchNorm_29" | |
top: "Add_12" | |
} | |
layer { | |
name: "ConvNd_30" | |
type: "Convolution" | |
bottom: "Add_12" | |
top: "ConvNd_30" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_30" | |
type: "BatchNorm" | |
bottom: "ConvNd_30" | |
top: "BatchNorm_30" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_30_scale" | |
type: "Scale" | |
bottom: "BatchNorm_30" | |
top: "BatchNorm_30" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_14" | |
type: "PReLU" | |
bottom: "BatchNorm_30" | |
top: "PReLU_14" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_31" | |
type: "Convolution" | |
bottom: "PReLU_14" | |
top: "ConvNd_31" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_31" | |
type: "BatchNorm" | |
bottom: "ConvNd_31" | |
top: "BatchNorm_31" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_31_scale" | |
type: "Scale" | |
bottom: "BatchNorm_31" | |
top: "BatchNorm_31" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_13" | |
type: "Eltwise" | |
bottom: "Add_12" | |
bottom: "BatchNorm_31" | |
top: "Add_13" | |
} | |
layer { | |
name: "ConvNd_32" | |
type: "Convolution" | |
bottom: "Add_13" | |
top: "ConvNd_32" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_32" | |
type: "BatchNorm" | |
bottom: "ConvNd_32" | |
top: "BatchNorm_32" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_32_scale" | |
type: "Scale" | |
bottom: "BatchNorm_32" | |
top: "BatchNorm_32" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_15" | |
type: "PReLU" | |
bottom: "BatchNorm_32" | |
top: "PReLU_15" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_33" | |
type: "Convolution" | |
bottom: "PReLU_15" | |
top: "ConvNd_33" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_33" | |
type: "BatchNorm" | |
bottom: "ConvNd_33" | |
top: "BatchNorm_33" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_33_scale" | |
type: "Scale" | |
bottom: "BatchNorm_33" | |
top: "BatchNorm_33" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_14" | |
type: "Eltwise" | |
bottom: "Add_13" | |
bottom: "BatchNorm_33" | |
top: "Add_14" | |
} | |
layer { | |
name: "ConvNd_34" | |
type: "Convolution" | |
bottom: "Add_14" | |
top: "ConvNd_34" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_34" | |
type: "BatchNorm" | |
bottom: "ConvNd_34" | |
top: "BatchNorm_34" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_34_scale" | |
type: "Scale" | |
bottom: "BatchNorm_34" | |
top: "BatchNorm_34" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_16" | |
type: "PReLU" | |
bottom: "BatchNorm_34" | |
top: "PReLU_16" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_35" | |
type: "Convolution" | |
bottom: "PReLU_16" | |
top: "ConvNd_35" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_35" | |
type: "BatchNorm" | |
bottom: "ConvNd_35" | |
top: "BatchNorm_35" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_35_scale" | |
type: "Scale" | |
bottom: "BatchNorm_35" | |
top: "BatchNorm_35" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_15" | |
type: "Eltwise" | |
bottom: "Add_14" | |
bottom: "BatchNorm_35" | |
top: "Add_15" | |
} | |
layer { | |
name: "ConvNd_36" | |
type: "Convolution" | |
bottom: "Add_15" | |
top: "ConvNd_36" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_36" | |
type: "BatchNorm" | |
bottom: "ConvNd_36" | |
top: "BatchNorm_36" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_36_scale" | |
type: "Scale" | |
bottom: "BatchNorm_36" | |
top: "BatchNorm_36" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_17" | |
type: "PReLU" | |
bottom: "BatchNorm_36" | |
top: "PReLU_17" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_37" | |
type: "Convolution" | |
bottom: "PReLU_17" | |
top: "ConvNd_37" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_37" | |
type: "BatchNorm" | |
bottom: "ConvNd_37" | |
top: "BatchNorm_37" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_37_scale" | |
type: "Scale" | |
bottom: "BatchNorm_37" | |
top: "BatchNorm_37" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_16" | |
type: "Eltwise" | |
bottom: "Add_15" | |
bottom: "BatchNorm_37" | |
top: "Add_16" | |
} | |
layer { | |
name: "ConvNd_38" | |
type: "Convolution" | |
bottom: "Add_16" | |
top: "ConvNd_38" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_38" | |
type: "BatchNorm" | |
bottom: "ConvNd_38" | |
top: "BatchNorm_38" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_38_scale" | |
type: "Scale" | |
bottom: "BatchNorm_38" | |
top: "BatchNorm_38" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_18" | |
type: "PReLU" | |
bottom: "BatchNorm_38" | |
top: "PReLU_18" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_39" | |
type: "Convolution" | |
bottom: "PReLU_18" | |
top: "ConvNd_39" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_39" | |
type: "BatchNorm" | |
bottom: "ConvNd_39" | |
top: "BatchNorm_39" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_39_scale" | |
type: "Scale" | |
bottom: "BatchNorm_39" | |
top: "BatchNorm_39" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_17" | |
type: "Eltwise" | |
bottom: "Add_16" | |
bottom: "BatchNorm_39" | |
top: "Add_17" | |
} | |
layer { | |
name: "ConvNd_40" | |
type: "Convolution" | |
bottom: "Add_17" | |
top: "ConvNd_40" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_40" | |
type: "BatchNorm" | |
bottom: "ConvNd_40" | |
top: "BatchNorm_40" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_40_scale" | |
type: "Scale" | |
bottom: "BatchNorm_40" | |
top: "BatchNorm_40" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_19" | |
type: "PReLU" | |
bottom: "BatchNorm_40" | |
top: "PReLU_19" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_41" | |
type: "Convolution" | |
bottom: "PReLU_19" | |
top: "ConvNd_41" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_41" | |
type: "BatchNorm" | |
bottom: "ConvNd_41" | |
top: "BatchNorm_41" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_41_scale" | |
type: "Scale" | |
bottom: "BatchNorm_41" | |
top: "BatchNorm_41" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_18" | |
type: "Eltwise" | |
bottom: "Add_17" | |
bottom: "BatchNorm_41" | |
top: "Add_18" | |
} | |
layer { | |
name: "ConvNd_42" | |
type: "Convolution" | |
bottom: "Add_18" | |
top: "ConvNd_42" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_42" | |
type: "BatchNorm" | |
bottom: "ConvNd_42" | |
top: "BatchNorm_42" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_42_scale" | |
type: "Scale" | |
bottom: "BatchNorm_42" | |
top: "BatchNorm_42" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_20" | |
type: "PReLU" | |
bottom: "BatchNorm_42" | |
top: "PReLU_20" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_43" | |
type: "Convolution" | |
bottom: "PReLU_20" | |
top: "ConvNd_43" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_43" | |
type: "BatchNorm" | |
bottom: "ConvNd_43" | |
top: "BatchNorm_43" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_43_scale" | |
type: "Scale" | |
bottom: "BatchNorm_43" | |
top: "BatchNorm_43" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_19" | |
type: "Eltwise" | |
bottom: "Add_18" | |
bottom: "BatchNorm_43" | |
top: "Add_19" | |
} | |
layer { | |
name: "ConvNd_44" | |
type: "Convolution" | |
bottom: "Add_19" | |
top: "ConvNd_44" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_44" | |
type: "BatchNorm" | |
bottom: "ConvNd_44" | |
top: "BatchNorm_44" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_44_scale" | |
type: "Scale" | |
bottom: "BatchNorm_44" | |
top: "BatchNorm_44" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_21" | |
type: "PReLU" | |
bottom: "BatchNorm_44" | |
top: "PReLU_21" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_45" | |
type: "Convolution" | |
bottom: "PReLU_21" | |
top: "ConvNd_45" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_45" | |
type: "BatchNorm" | |
bottom: "ConvNd_45" | |
top: "BatchNorm_45" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_45_scale" | |
type: "Scale" | |
bottom: "BatchNorm_45" | |
top: "BatchNorm_45" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_20" | |
type: "Eltwise" | |
bottom: "Add_19" | |
bottom: "BatchNorm_45" | |
top: "Add_20" | |
} | |
layer { | |
name: "ConvNd_46" | |
type: "Convolution" | |
bottom: "Add_20" | |
top: "ConvNd_46" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_46" | |
type: "BatchNorm" | |
bottom: "ConvNd_46" | |
top: "BatchNorm_46" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_46_scale" | |
type: "Scale" | |
bottom: "BatchNorm_46" | |
top: "BatchNorm_46" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_22" | |
type: "PReLU" | |
bottom: "BatchNorm_46" | |
top: "PReLU_22" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_47" | |
type: "Convolution" | |
bottom: "PReLU_22" | |
top: "ConvNd_47" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_47" | |
type: "BatchNorm" | |
bottom: "ConvNd_47" | |
top: "BatchNorm_47" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_47_scale" | |
type: "Scale" | |
bottom: "BatchNorm_47" | |
top: "BatchNorm_47" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_21" | |
type: "Eltwise" | |
bottom: "Add_20" | |
bottom: "BatchNorm_47" | |
top: "Add_21" | |
} | |
layer { | |
name: "ConvNd_48" | |
type: "Convolution" | |
bottom: "Add_21" | |
top: "ConvNd_48" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_48" | |
type: "BatchNorm" | |
bottom: "ConvNd_48" | |
top: "BatchNorm_48" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_48_scale" | |
type: "Scale" | |
bottom: "BatchNorm_48" | |
top: "BatchNorm_48" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "ConvNd_49" | |
type: "Convolution" | |
bottom: "Add_21" | |
top: "ConvNd_49" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_49" | |
type: "BatchNorm" | |
bottom: "ConvNd_49" | |
top: "BatchNorm_49" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_49_scale" | |
type: "Scale" | |
bottom: "BatchNorm_49" | |
top: "BatchNorm_49" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_23" | |
type: "PReLU" | |
bottom: "BatchNorm_49" | |
top: "PReLU_23" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_50" | |
type: "Convolution" | |
bottom: "PReLU_23" | |
top: "ConvNd_50" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_50" | |
type: "BatchNorm" | |
bottom: "ConvNd_50" | |
top: "BatchNorm_50" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_50_scale" | |
type: "Scale" | |
bottom: "BatchNorm_50" | |
top: "BatchNorm_50" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_22" | |
type: "Eltwise" | |
bottom: "BatchNorm_48" | |
bottom: "BatchNorm_50" | |
top: "Add_22" | |
} | |
layer { | |
name: "ConvNd_51" | |
type: "Convolution" | |
bottom: "Add_22" | |
top: "ConvNd_51" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_51" | |
type: "BatchNorm" | |
bottom: "ConvNd_51" | |
top: "BatchNorm_51" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_51_scale" | |
type: "Scale" | |
bottom: "BatchNorm_51" | |
top: "BatchNorm_51" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_24" | |
type: "PReLU" | |
bottom: "BatchNorm_51" | |
top: "PReLU_24" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_52" | |
type: "Convolution" | |
bottom: "PReLU_24" | |
top: "ConvNd_52" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_52" | |
type: "BatchNorm" | |
bottom: "ConvNd_52" | |
top: "BatchNorm_52" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_52_scale" | |
type: "Scale" | |
bottom: "BatchNorm_52" | |
top: "BatchNorm_52" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_23" | |
type: "Eltwise" | |
bottom: "Add_22" | |
bottom: "BatchNorm_52" | |
top: "Add_23" | |
} | |
layer { | |
name: "ConvNd_53" | |
type: "Convolution" | |
bottom: "Add_23" | |
top: "ConvNd_53" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_53" | |
type: "BatchNorm" | |
bottom: "ConvNd_53" | |
top: "BatchNorm_53" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_53_scale" | |
type: "Scale" | |
bottom: "BatchNorm_53" | |
top: "BatchNorm_53" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_25" | |
type: "PReLU" | |
bottom: "BatchNorm_53" | |
top: "PReLU_25" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_54" | |
type: "Convolution" | |
bottom: "PReLU_25" | |
top: "ConvNd_54" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_54" | |
type: "BatchNorm" | |
bottom: "ConvNd_54" | |
top: "BatchNorm_54" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_54_scale" | |
type: "Scale" | |
bottom: "BatchNorm_54" | |
top: "BatchNorm_54" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Add_24" | |
type: "Eltwise" | |
bottom: "Add_23" | |
bottom: "BatchNorm_54" | |
top: "Add_24" | |
} | |
layer { | |
name: "ConvNd_55" | |
type: "Convolution" | |
bottom: "Add_24" | |
top: "ConvNd_55" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 0 | |
kernel_size: 7 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_55" | |
type: "BatchNorm" | |
bottom: "ConvNd_55" | |
top: "BatchNorm_55" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_55_scale" | |
type: "Scale" | |
bottom: "BatchNorm_55" | |
top: "BatchNorm_55" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "PReLU_26" | |
type: "PReLU" | |
bottom: "BatchNorm_55" | |
top: "PReLU_26" | |
prelu_param { | |
channel_shared: false | |
} | |
} | |
layer { | |
name: "ConvNd_56" | |
type: "Convolution" | |
bottom: "PReLU_26" | |
top: "ConvNd_56" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm_56" | |
type: "BatchNorm" | |
bottom: "ConvNd_56" | |
top: "BatchNorm_56" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 9.99999974738e-06 | |
} | |
} | |
layer { | |
name: "BatchNorm_56_scale" | |
type: "Scale" | |
bottom: "BatchNorm_56" | |
top: "BatchNorm_56" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "View_1" | |
type: "Flatten" | |
bottom: "BatchNorm_56" | |
top: "View_1" | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment