Skip to content

Instantly share code, notes, and snippets.

@MarcoForte
Last active July 20, 2017 20:49
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save MarcoForte/890e87e6d3b8b1eca9aff150d81875bf to your computer and use it in GitHub Desktop.
Save MarcoForte/890e87e6d3b8b1eca9aff150d81875bf to your computer and use it in GitHub Desktop.
For visualization online
name: "color"
input: "data"
input_dim: 1
input_dim: 4
input_dim: 256
input_dim: 256
layer {
name: "data_l_ab_mask"
type: "Input"
top: "data_l_ab_mask"
input_param { shape { dim: 1 dim: 4 dim: 256 dim: 256 } }
}
layer {
name: "Slice"
type: "Slice"
bottom: "data_l_ab_mask"
top: "data_l"
top: "data_ab_mask"
slice_param { axis: 1 slice_point: 1 }
}
# *****************
# ***** conv1 *****
# *****************
layer {
name: "ab_conv1_1"
type: "Convolution"
bottom: "data_ab_mask"
top: "ab_conv1_1"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
}
}
layer {
name: "bw_conv1_1"
type: "Convolution"
bottom: "data_l"
top: "bw_conv1_1"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
}
}
layer {
name: "conv1_1"
type: "Eltwise"
bottom: "bw_conv1_1"
bottom: "ab_conv1_1"
top: "conv1_1"
}
layer {
name: "relu1_1"
type: "ReLU"
bottom: "conv1_1"
top: "conv1_1"
}
layer {
name: "conv1_2"
type: "Convolution"
bottom: "conv1_1"
top: "conv1_2"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu1_2"
type: "ReLU"
bottom: "conv1_2"
top: "conv1_2"
}
layer {
name: "conv1_2norm"
type: "BatchNorm"
bottom: "conv1_2"
top: "conv1_2norm"
batch_norm_param{ }
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
}
layer {
name: "conv1_2norm_ss"
type: "Convolution"
bottom: "conv1_2norm"
top: "conv1_2norm_ss"
param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
bias_term: false
num_output: 64
kernel_size: 1
stride: 2
weight_filler { type: 'constant' value: 1 }
group: 64
}
}
# *****************
# ***** conv2 *****
# *****************
layer {
name: "conv2_1"
type: "Convolution"
# bottom: "conv1_2"
# bottom: "conv1_2norm"
bottom: "conv1_2norm_ss"
# bottom: "pool1"
top: "conv2_1"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
}
}
layer {
name: "relu2_1"
type: "ReLU"
bottom: "conv2_1"
top: "conv2_1"
}
layer {
name: "conv2_2"
type: "Convolution"
bottom: "conv2_1"
top: "conv2_2"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu2_2"
type: "ReLU"
bottom: "conv2_2"
top: "conv2_2"
}
layer {
name: "conv2_2norm"
type: "BatchNorm"
bottom: "conv2_2"
top: "conv2_2norm"
batch_norm_param{ }
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
}
layer {
name: "conv2_2norm_ss"
type: "Convolution"
bottom: "conv2_2norm"
top: "conv2_2norm_ss"
param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
bias_term: false
num_output: 128
kernel_size: 1
stride: 2
weight_filler { type: 'constant' value: 1 }
group: 128
}
}
# *****************
# ***** conv3 *****
# *****************
layer {
name: "conv3_1"
type: "Convolution"
# bottom: "conv2_2"
bottom: "conv2_2norm_ss"
# bottom: "pool2"
top: "conv3_1"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
}
}
layer {
name: "relu3_1"
type: "ReLU"
bottom: "conv3_1"
top: "conv3_1"
}
layer {
name: "conv3_2"
type: "Convolution"
bottom: "conv3_1"
top: "conv3_2"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
}
}
layer {
name: "relu3_2"
type: "ReLU"
bottom: "conv3_2"
top: "conv3_2"
}
layer {
name: "conv3_3"
type: "Convolution"
bottom: "conv3_2"
top: "conv3_3"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3_3"
type: "ReLU"
bottom: "conv3_3"
top: "conv3_3"
}
layer {
name: "conv3_3norm"
type: "BatchNorm"
bottom: "conv3_3"
top: "conv3_3norm"
batch_norm_param{ }
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
}
layer {
name: "conv3_3norm_ss"
type: "Convolution"
bottom: "conv3_3norm"
top: "conv3_3norm_ss"
param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
bias_term: false
num_output: 256
kernel_size: 1
stride: 2
weight_filler { type: 'constant' value: 1 }
group: 256
}
}
# *****************
# ***** conv4 *****
# *****************
layer {
name: "conv4_1"
type: "Convolution"
# bottom: "conv3_3"
# bottom: "conv3_3norm"
bottom: "conv3_3norm_ss"
# bottom: "pool3"
top: "conv4_1"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 512
kernel_size: 3
stride: 1
pad: 1
dilation: 1
}
}
layer {
name: "relu4_1"
type: "ReLU"
bottom: "conv4_1"
top: "conv4_1"
}
layer {
name: "conv4_2"
type: "Convolution"
bottom: "conv4_1"
top: "conv4_2"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 512
kernel_size: 3
stride: 1
pad: 1
dilation: 1
}
}
layer {
name: "relu4_2"
type: "ReLU"
bottom: "conv4_2"
top: "conv4_2"
}
layer {
name: "conv4_3"
type: "Convolution"
bottom: "conv4_2"
top: "conv4_3"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 512
kernel_size: 3
stride: 1
pad: 1
dilation: 1
}
}
layer {
name: "relu4_3"
type: "ReLU"
bottom: "conv4_3"
top: "conv4_3"
}
layer {
name: "conv4_3norm"
type: "BatchNorm"
bottom: "conv4_3"
top: "conv4_3norm"
batch_norm_param{ }
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
}
# *****************
# ***** conv5 *****
# *****************
layer {
name: "conv5_1"
type: "Convolution"
# bottom: "conv4_3"
bottom: "conv4_3norm"
# bottom: "pool4"
top: "conv5_1"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 512
kernel_size: 3
stride: 1
pad: 2
dilation: 2
}
}
layer {
name: "relu5_1"
type: "ReLU"
bottom: "conv5_1"
top: "conv5_1"
}
layer {
name: "conv5_2"
type: "Convolution"
bottom: "conv5_1"
top: "conv5_2"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 512
kernel_size: 3
stride: 1
pad: 2
dilation: 2
}
}
layer {
name: "relu5_2"
type: "ReLU"
bottom: "conv5_2"
top: "conv5_2"
}
layer {
name: "conv5_3"
type: "Convolution"
bottom: "conv5_2"
top: "conv5_3"
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
# param {lr_mult: 0 decay_mult: 0} # UNARY_BRANCH_PROPAGATE
convolution_param {
num_output: 512
kernel_size: 3
stride: 1
pad: 2
dilation: 2
}
}
layer {
name: "relu5_3"
type: "ReLU"
bottom: "conv5_3"
top: "conv5_3"
}
layer {
name: "conv5_3norm"
type: "BatchNorm"
bottom: "conv5_3"
top: "conv5_3norm"
batch_norm_param{ }
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
}
# *****************
# ***** conv6 *****
# *****************
layer {
name: "conv6_1"
type: "Convolution"
bottom: "conv5_3norm"
top: "conv6_1"
convolution_param {
num_output: 512
kernel_size: 3
pad: 2
dilation: 2
}
}
layer {
name: "relu6_1"
type: "ReLU"
bottom: "conv6_1"
top: "conv6_1"
}
layer {
name: "conv6_2"
type: "Convolution"
bottom: "conv6_1"
top: "conv6_2"
convolution_param {
num_output: 512
kernel_size: 3
pad: 2
dilation: 2
}
}
layer {
name: "relu6_2"
type: "ReLU"
bottom: "conv6_2"
top: "conv6_2"
}
layer {
name: "conv6_3"
type: "Convolution"
bottom: "conv6_2"
top: "conv6_3"
convolution_param {
num_output: 512
kernel_size: 3
pad: 2
dilation: 2
}
}
layer {
name: "relu6_3"
type: "ReLU"
bottom: "conv6_3"
top: "conv6_3"
}
layer {
name: "conv6_3norm"
type: "BatchNorm"
bottom: "conv6_3"
top: "conv6_3norm"
batch_norm_param{ }
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
}
# *****************
# ***** conv7 *****
# *****************
layer {
name: "conv7_1"
type: "Convolution"
bottom: "conv6_3norm"
top: "conv7_1"
convolution_param {
num_output: 512
kernel_size: 3
pad: 1
dilation: 1
}
}
layer {
name: "relu7_1"
type: "ReLU"
bottom: "conv7_1"
top: "conv7_1"
}
layer {
name: "conv7_2"
type: "Convolution"
bottom: "conv7_1"
top: "conv7_2"
convolution_param {
num_output: 512
kernel_size: 3
pad: 1
dilation: 1
}
}
layer {
name: "relu7_2"
type: "ReLU"
bottom: "conv7_2"
top: "conv7_2"
}
layer {
name: "conv7_3"
type: "Convolution"
bottom: "conv7_2"
top: "conv7_3"
convolution_param {
num_output: 512
kernel_size: 3
pad: 1
dilation: 1
}
}
layer {
name: "relu7_3"
type: "ReLU"
bottom: "conv7_3"
top: "conv7_3"
}
layer {
name: "conv7_3norm"
type: "BatchNorm"
bottom: "conv7_3"
top: "conv7_3norm"
batch_norm_param{ }
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
}
# *****************
# ***** conv8 *****
# *****************
layer {
name: "conv8_1"
type: "Deconvolution"
bottom: "conv7_3norm"
top: "conv8_1"
convolution_param {
num_output: 256
kernel_size: 4
pad: 1
dilation: 1
stride: 2
}
}
# ***** Shortcut *****
layer {
name: "conv3_3_short"
type: "Convolution"
bottom: "conv3_3norm"
top: "conv3_3_short"
convolution_param {
num_output: 256
kernel_size: 3
pad: 1
stride: 1
weight_filler { type: "gaussian" std: .01 }
bias_filler { type: "constant" value: 1 }
}
}
layer {
name: "conv8_1_comb"
type: "Eltwise"
bottom: "conv8_1"
bottom: "conv3_3_short"
top: "conv8_1_comb"
}
# ***** End Shortcut *****
layer {
name: "conv8_1_comb"
type: "ReLU"
bottom: "conv8_1_comb"
top: "conv8_1_comb"
}
layer {
name: "conv8_2"
type: "Convolution"
bottom: "conv8_1_comb"
top: "conv8_2"
convolution_param {
num_output: 256
kernel_size: 3
pad: 1
dilation: 1
}
}
layer {
name: "relu8_2"
type: "ReLU"
bottom: "conv8_2"
top: "conv8_2"
}
layer {
name: "conv8_3"
type: "Convolution"
bottom: "conv8_2"
top: "conv8_3"
convolution_param {
num_output: 256
kernel_size: 3
pad: 1
dilation: 1
}
}
layer {
name: "relu8_3"
type: "ReLU"
bottom: "conv8_3"
top: "conv8_3"
}
layer {
name: "conv8_3norm"
type: "BatchNorm"
bottom: "conv8_3"
top: "conv8_3norm"
batch_norm_param{ }
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
param {lr_mult: 0 decay_mult: 0}
}
# ****************************
# ***** Unary prediction *****
# ****************************
layer {
name: "conv3_pred"
type: "Convolution"
bottom: "conv3_3norm"
top: "conv3_pred"
propagate_down: false
convolution_param {
num_output: 384
kernel_size: 3
stride: 1
pad: 1
dilation: 1
weight_filler { type: "gaussian" std: .01 }
bias_filler { type: "constant" value: 1 }
}
}
layer {
name: "conv4_pred"
type: "Deconvolution"
bottom: "conv4_3norm"
top: "conv4_pred"
propagate_down: false
convolution_param {
num_output: 384
kernel_size: 4
stride: 2
pad: 1
dilation: 1
weight_filler { type: "gaussian" std: .01 }
bias_filler { type: "constant" value: 1 }
}
}
layer {
name: "conv5_pred"
type: "Deconvolution"
bottom: "conv5_3norm"
top: "conv5_pred"
propagate_down: false
convolution_param {
num_output: 384
kernel_size: 4
stride: 2
pad: 1
dilation: 1
weight_filler { type: "gaussian" std: .01 }
bias_filler { type: "constant" value: 1 }
}
}
layer {
name: "conv6_pred"
type: "Deconvolution"
bottom: "conv6_3norm"
top: "conv6_pred"
propagate_down: false
convolution_param {
num_output: 384
kernel_size: 4
stride: 2
pad: 1
dilation: 1
weight_filler { type: "gaussian" std: .01 }
bias_filler { type: "constant" value: 1 }
}
}
layer {
name: "conv7_pred"
type: "Deconvolution"
bottom: "conv7_3norm"
top: "conv7_pred"
propagate_down: false
convolution_param {
num_output: 384
kernel_size: 4
stride: 2
pad: 1
dilation: 1
weight_filler { type: "gaussian" std: .01 }
bias_filler { type: "constant" value: 1 }
}
}
layer {
name: "conv8_pred"
type: "Convolution"
bottom: "conv8_3norm"
top: "conv8_pred"
propagate_down: false
convolution_param {
num_output: 384
kernel_size: 3
stride: 1
pad: 1
dilation: 1
weight_filler { type: "gaussian" std: .01 }
bias_filler { type: "constant" value: 1 }
}
}
layer {
name: "conv345678_pred"
type: "Eltwise"
bottom: "conv3_pred"
bottom: "conv4_pred"
bottom: "conv5_pred"
bottom: "conv6_pred"
bottom: "conv7_pred"
bottom: "conv8_pred"
top: "conv345678_pred"
}
layer {
name: "relu345678_pred"
type: "ReLU"
bottom: "conv345678_pred"
top: "conv345678_pred"
}
layer {
name: "pred_313"
type: "Convolution"
bottom: "conv345678_pred"
top: "pred_313"
convolution_param {
num_output: 313
kernel_size: 1
stride: 1
dilation: 1
}
}
layer {
name: "pred_313_us"
type: "Deconvolution"
bottom: "pred_313"
top: "pred_313_us"
param { name: "kern_us" }
convolution_param {
bias_term: false
num_output: 313
kernel_size: 4
stride: 2
pad: 1
group: 313
}
}
layer {
name: "pred_313_rs"
type: "Deconvolution"
bottom: "pred_313_us"
top: "pred_313_rs"
param { name: "kern_us" }
convolution_param {
bias_term: false
num_output: 313
kernel_size: 4
stride: 2
pad: 1
group: 313
}
}
layer { # make less peaky
name: "scale_S"
bottom: "pred_313_rs"
type: "Scale"
top: "scale_S"
scale_param {
filler { type: "constant" value: .2 }
}
}
layer {
name: "dist_ab_S"
type: "Softmax"
bottom: "scale_S"
top: "dist_ab_S"
}
# **************************
# ***** Point Estimate *****
# **************************
layer { # make more peaky
name: "scale_T"
bottom: "pred_313_rs"
type: "Scale"
top: "scale_T"
scale_param {
filler { type: "constant" value: 2.6 }
}
}
layer {
name: "dist_ab"
type: "Softmax"
bottom: "scale_T"
top: "dist_ab_T"
}
layer {
name: "pred_ab"
type: "Convolution"
bottom: "dist_ab_T"
top: "pred_ab"
convolution_param {
kernel_size: 1
num_output: 2
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment