Skip to content

Instantly share code, notes, and snippets.

@ducha-aiki
Created October 22, 2015 09:05
Show Gist options
  • Star 5 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ducha-aiki/c0d1325f0cebe0b05c36 to your computer and use it in GitHub Desktop.
Save ducha-aiki/c0d1325f0cebe0b05c36 to your computer and use it in GitHub Desktop.
Examples of how to use batch_norm in caffe
# The train/test net protocol buffer definition
net: "examples/cifar10/cifar10_full_sigmoid_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 1000 training iterations.
test_interval: 1000
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.001
momentum: 0.9
weight_decay: 0.004
# The learning rate policy
lr_policy: "multistep"
gamma: 0.1
# Display every 200 iterations
display: 100
# The maximum number of iterations
max_iter: 40000
stepvalue: 30000
stepvalue: 35000
# snapshot intermediate results
snapshot_prefix: "examples/cifar10_full_sigmoid"
# solver mode: CPU or GPU
solver_mode: GPU
# The train/test net protocol buffer definition
net: "examples/cifar10/cifar10_full_sigmoid_train_test_bn.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
test_initialization: false
# Carry out testing every 1000 training iterations.
test_interval: 1000
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.001
momentum: 0.9
weight_decay: 0.004
# The learning rate policy
lr_policy: "multistep"
gamma: 0.1
# Display every 200 iterations
display: 100
# The maximum number of iterations
max_iter: 40000
stepvalue: 30000
stepvalue: 35000
# snapshot intermediate results
snapshot_prefix: "examples/cifar10_full_sigmoid_bn"
# solver mode: CPU or GPU
solver_mode: GPU
name: "CIFAR10_full"
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
data_param {
source: "examples/cifar10/cifar10_train_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
data_param {
source: "examples/cifar10/cifar10_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.0001
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "Sigmoid1"
type: "Sigmoid"
bottom: "pool1"
top: "pool1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "Sigmoid2"
type: "Sigmoid"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
convolution_param {
num_output: 64
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
}
layer {
name: "Sigmoid3"
type: "Sigmoid"
bottom: "conv3"
top: "conv3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool3"
top: "ip1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 10
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip1"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip1"
bottom: "label"
top: "loss"
}
name: "CIFAR10_full"
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
data_param {
source: "examples/cifar10/cifar10_train_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
data_param {
source: "examples/cifar10/cifar10_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 0
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.0001
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "bn1"
type: "BatchNorm"
bottom: "pool1"
top: "bn1"
batch_norm_param {
use_global_stats: false
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TRAIN
}
}
layer {
name: "bn1"
type: "BatchNorm"
bottom: "pool1"
top: "bn1"
batch_norm_param {
use_global_stats: true
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TEST
}
}
layer {
name: "Sigmoid1"
type: "Sigmoid"
bottom: "bn1"
top: "bn1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "bn1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 0
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "bn2"
type: "BatchNorm"
bottom: "conv2"
top: "conv2"
batch_norm_param {
use_global_stats: false
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TRAIN
}
}
layer {
name: "bn2"
type: "BatchNorm"
bottom: "conv2"
top: "conv2"
batch_norm_param {
use_global_stats: true
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TEST
}
}
layer {
name: "Sigmoid2"
type: "Sigmoid"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
convolution_param {
num_output: 64
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "bn3"
type: "BatchNorm"
bottom: "conv3"
top: "conv3"
batch_norm_param {
use_global_stats: false
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TRAIN
}
}
layer {
name: "bn3"
type: "BatchNorm"
bottom: "conv3"
top: "conv3"
batch_norm_param {
use_global_stats: true
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TEST
}
}
layer {
name: "Sigmoid3"
type: "Sigmoid"
bottom: "conv3"
top: "conv3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool3"
top: "ip1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 10
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip1"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip1"
bottom: "label"
top: "loss"
}
I1022 10:46:51.019372 8536 caffe.cpp:184] Using GPUs 0
I1022 10:46:51.137827 8536 solver.cpp:47] Initializing solver from parameters:
test_iter: 100
test_interval: 1000
base_lr: 0.001
display: 100
max_iter: 40000
lr_policy: "multistep"
gamma: 0.1
momentum: 0.9
weight_decay: 0.004
snapshot_prefix: "examples/cifar10_full_sigmoid"
solver_mode: GPU
device_id: 0
net: "examples/cifar10/cifar10_full_sigmoid_train_test.prototxt"
stepvalue: 30000
stepvalue: 35000
I1022 10:46:51.137861 8536 solver.cpp:90] Creating training net from net file: examples/cifar10/cifar10_full_sigmoid_train_test.prototxt
I1022 10:46:51.138185 8536 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer cifar
I1022 10:46:51.138197 8536 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I1022 10:46:51.138308 8536 net.cpp:49] Initializing net from parameters:
name: "CIFAR10_full"
state {
phase: TRAIN
}
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
data_param {
source: "examples/cifar10/cifar10_train_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.0001
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "Sigmoid1"
type: "Sigmoid"
bottom: "pool1"
top: "pool1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "Sigmoid2"
type: "Sigmoid"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "Sigmoid3"
type: "Sigmoid"
bottom: "conv3"
top: "conv3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool3"
top: "ip1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 10
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip1"
bottom: "label"
top: "loss"
}
I1022 10:46:51.138360 8536 layer_factory.hpp:76] Creating layer cifar
I1022 10:46:51.138814 8536 net.cpp:106] Creating Layer cifar
I1022 10:46:51.138836 8536 net.cpp:411] cifar -> data
I1022 10:46:51.138877 8536 net.cpp:411] cifar -> label
I1022 10:46:51.138906 8536 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto
I1022 10:46:51.139557 8538 db_lmdb.cpp:38] Opened lmdb examples/cifar10/cifar10_train_lmdb
I1022 10:46:51.151831 8536 data_layer.cpp:45] output data size: 100,3,32,32
I1022 10:46:51.154021 8536 net.cpp:150] Setting up cifar
I1022 10:46:51.154062 8536 net.cpp:157] Top shape: 100 3 32 32 (307200)
I1022 10:46:51.154109 8536 net.cpp:157] Top shape: 100 (100)
I1022 10:46:51.154119 8536 net.cpp:165] Memory required for data: 1229200
I1022 10:46:51.154144 8536 layer_factory.hpp:76] Creating layer conv1
I1022 10:46:51.154168 8536 net.cpp:106] Creating Layer conv1
I1022 10:46:51.154193 8536 net.cpp:454] conv1 <- data
I1022 10:46:51.154211 8536 net.cpp:411] conv1 -> conv1
I1022 10:46:51.154752 8536 net.cpp:150] Setting up conv1
I1022 10:46:51.154758 8536 net.cpp:157] Top shape: 100 32 32 32 (3276800)
I1022 10:46:51.154773 8536 net.cpp:165] Memory required for data: 14336400
I1022 10:46:51.154786 8536 layer_factory.hpp:76] Creating layer pool1
I1022 10:46:51.154817 8536 net.cpp:106] Creating Layer pool1
I1022 10:46:51.154821 8536 net.cpp:454] pool1 <- conv1
I1022 10:46:51.154825 8536 net.cpp:411] pool1 -> pool1
I1022 10:46:51.155061 8536 net.cpp:150] Setting up pool1
I1022 10:46:51.155066 8536 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:46:51.155068 8536 net.cpp:165] Memory required for data: 17613200
I1022 10:46:51.155083 8536 layer_factory.hpp:76] Creating layer Sigmoid1
I1022 10:46:51.155086 8536 net.cpp:106] Creating Layer Sigmoid1
I1022 10:46:51.155088 8536 net.cpp:454] Sigmoid1 <- pool1
I1022 10:46:51.155092 8536 net.cpp:397] Sigmoid1 -> pool1 (in-place)
I1022 10:46:51.155097 8536 net.cpp:150] Setting up Sigmoid1
I1022 10:46:51.155099 8536 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:46:51.155110 8536 net.cpp:165] Memory required for data: 20890000
I1022 10:46:51.155112 8536 layer_factory.hpp:76] Creating layer conv2
I1022 10:46:51.155118 8536 net.cpp:106] Creating Layer conv2
I1022 10:46:51.155122 8536 net.cpp:454] conv2 <- pool1
I1022 10:46:51.155125 8536 net.cpp:411] conv2 -> conv2
I1022 10:46:51.156155 8536 net.cpp:150] Setting up conv2
I1022 10:46:51.156163 8536 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:46:51.156165 8536 net.cpp:165] Memory required for data: 24166800
I1022 10:46:51.156182 8536 layer_factory.hpp:76] Creating layer Sigmoid2
I1022 10:46:51.156185 8536 net.cpp:106] Creating Layer Sigmoid2
I1022 10:46:51.156189 8536 net.cpp:454] Sigmoid2 <- conv2
I1022 10:46:51.156193 8536 net.cpp:397] Sigmoid2 -> conv2 (in-place)
I1022 10:46:51.156196 8536 net.cpp:150] Setting up Sigmoid2
I1022 10:46:51.156199 8536 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:46:51.156201 8536 net.cpp:165] Memory required for data: 27443600
I1022 10:46:51.156203 8536 layer_factory.hpp:76] Creating layer pool2
I1022 10:46:51.156206 8536 net.cpp:106] Creating Layer pool2
I1022 10:46:51.156208 8536 net.cpp:454] pool2 <- conv2
I1022 10:46:51.156211 8536 net.cpp:411] pool2 -> pool2
I1022 10:46:51.156224 8536 net.cpp:150] Setting up pool2
I1022 10:46:51.156229 8536 net.cpp:157] Top shape: 100 32 8 8 (204800)
I1022 10:46:51.156229 8536 net.cpp:165] Memory required for data: 28262800
I1022 10:46:51.156231 8536 layer_factory.hpp:76] Creating layer conv3
I1022 10:46:51.156237 8536 net.cpp:106] Creating Layer conv3
I1022 10:46:51.156240 8536 net.cpp:454] conv3 <- pool2
I1022 10:46:51.156244 8536 net.cpp:411] conv3 -> conv3
I1022 10:46:51.157595 8536 net.cpp:150] Setting up conv3
I1022 10:46:51.157603 8536 net.cpp:157] Top shape: 100 64 8 8 (409600)
I1022 10:46:51.157604 8536 net.cpp:165] Memory required for data: 29901200
I1022 10:46:51.157610 8536 layer_factory.hpp:76] Creating layer Sigmoid3
I1022 10:46:51.157616 8536 net.cpp:106] Creating Layer Sigmoid3
I1022 10:46:51.157634 8536 net.cpp:454] Sigmoid3 <- conv3
I1022 10:46:51.157649 8536 net.cpp:397] Sigmoid3 -> conv3 (in-place)
I1022 10:46:51.157660 8536 net.cpp:150] Setting up Sigmoid3
I1022 10:46:51.157665 8536 net.cpp:157] Top shape: 100 64 8 8 (409600)
I1022 10:46:51.157666 8536 net.cpp:165] Memory required for data: 31539600
I1022 10:46:51.157668 8536 layer_factory.hpp:76] Creating layer pool3
I1022 10:46:51.157673 8536 net.cpp:106] Creating Layer pool3
I1022 10:46:51.157675 8536 net.cpp:454] pool3 <- conv3
I1022 10:46:51.157678 8536 net.cpp:411] pool3 -> pool3
I1022 10:46:51.157698 8536 net.cpp:150] Setting up pool3
I1022 10:46:51.157712 8536 net.cpp:157] Top shape: 100 64 4 4 (102400)
I1022 10:46:51.157716 8536 net.cpp:165] Memory required for data: 31949200
I1022 10:46:51.157716 8536 layer_factory.hpp:76] Creating layer ip1
I1022 10:46:51.157724 8536 net.cpp:106] Creating Layer ip1
I1022 10:46:51.157727 8536 net.cpp:454] ip1 <- pool3
I1022 10:46:51.157730 8536 net.cpp:411] ip1 -> ip1
I1022 10:46:51.158514 8536 net.cpp:150] Setting up ip1
I1022 10:46:51.158522 8536 net.cpp:157] Top shape: 100 10 (1000)
I1022 10:46:51.158524 8536 net.cpp:165] Memory required for data: 31953200
I1022 10:46:51.158529 8536 layer_factory.hpp:76] Creating layer loss
I1022 10:46:51.158535 8536 net.cpp:106] Creating Layer loss
I1022 10:46:51.158537 8536 net.cpp:454] loss <- ip1
I1022 10:46:51.158540 8536 net.cpp:454] loss <- label
I1022 10:46:51.158545 8536 net.cpp:411] loss -> loss
I1022 10:46:51.158552 8536 layer_factory.hpp:76] Creating layer loss
I1022 10:46:51.158612 8536 net.cpp:150] Setting up loss
I1022 10:46:51.158617 8536 net.cpp:157] Top shape: (1)
I1022 10:46:51.158618 8536 net.cpp:160] with loss weight 1
I1022 10:46:51.158638 8536 net.cpp:165] Memory required for data: 31953204
I1022 10:46:51.158639 8536 net.cpp:226] loss needs backward computation.
I1022 10:46:51.158642 8536 net.cpp:226] ip1 needs backward computation.
I1022 10:46:51.158643 8536 net.cpp:226] pool3 needs backward computation.
I1022 10:46:51.158645 8536 net.cpp:226] Sigmoid3 needs backward computation.
I1022 10:46:51.158646 8536 net.cpp:226] conv3 needs backward computation.
I1022 10:46:51.158648 8536 net.cpp:226] pool2 needs backward computation.
I1022 10:46:51.158650 8536 net.cpp:226] Sigmoid2 needs backward computation.
I1022 10:46:51.158653 8536 net.cpp:226] conv2 needs backward computation.
I1022 10:46:51.158654 8536 net.cpp:226] Sigmoid1 needs backward computation.
I1022 10:46:51.158656 8536 net.cpp:226] pool1 needs backward computation.
I1022 10:46:51.158658 8536 net.cpp:226] conv1 needs backward computation.
I1022 10:46:51.158660 8536 net.cpp:228] cifar does not need backward computation.
I1022 10:46:51.158661 8536 net.cpp:270] This network produces output loss
I1022 10:46:51.158669 8536 net.cpp:283] Network initialization done.
I1022 10:46:51.158933 8536 solver.cpp:180] Creating test net (#0) specified by net file: examples/cifar10/cifar10_full_sigmoid_train_test.prototxt
I1022 10:46:51.158953 8536 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer cifar
I1022 10:46:51.159047 8536 net.cpp:49] Initializing net from parameters:
name: "CIFAR10_full"
state {
phase: TEST
}
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
data_param {
source: "examples/cifar10/cifar10_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.0001
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "Sigmoid1"
type: "Sigmoid"
bottom: "pool1"
top: "pool1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "Sigmoid2"
type: "Sigmoid"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "Sigmoid3"
type: "Sigmoid"
bottom: "conv3"
top: "conv3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool3"
top: "ip1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 10
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip1"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip1"
bottom: "label"
top: "loss"
}
I1022 10:46:51.159111 8536 layer_factory.hpp:76] Creating layer cifar
I1022 10:46:51.159279 8536 net.cpp:106] Creating Layer cifar
I1022 10:46:51.159299 8536 net.cpp:411] cifar -> data
I1022 10:46:51.159314 8536 net.cpp:411] cifar -> label
I1022 10:46:51.159328 8536 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto
I1022 10:46:51.159919 8540 db_lmdb.cpp:38] Opened lmdb examples/cifar10/cifar10_test_lmdb
I1022 10:46:51.159982 8536 data_layer.cpp:45] output data size: 100,3,32,32
I1022 10:46:51.162274 8536 net.cpp:150] Setting up cifar
I1022 10:46:51.162292 8536 net.cpp:157] Top shape: 100 3 32 32 (307200)
I1022 10:46:51.162297 8536 net.cpp:157] Top shape: 100 (100)
I1022 10:46:51.162298 8536 net.cpp:165] Memory required for data: 1229200
I1022 10:46:51.162303 8536 layer_factory.hpp:76] Creating layer label_cifar_1_split
I1022 10:46:51.162314 8536 net.cpp:106] Creating Layer label_cifar_1_split
I1022 10:46:51.162317 8536 net.cpp:454] label_cifar_1_split <- label
I1022 10:46:51.162323 8536 net.cpp:411] label_cifar_1_split -> label_cifar_1_split_0
I1022 10:46:51.162330 8536 net.cpp:411] label_cifar_1_split -> label_cifar_1_split_1
I1022 10:46:51.162369 8536 net.cpp:150] Setting up label_cifar_1_split
I1022 10:46:51.162374 8536 net.cpp:157] Top shape: 100 (100)
I1022 10:46:51.162376 8536 net.cpp:157] Top shape: 100 (100)
I1022 10:46:51.162377 8536 net.cpp:165] Memory required for data: 1230000
I1022 10:46:51.162380 8536 layer_factory.hpp:76] Creating layer conv1
I1022 10:46:51.162389 8536 net.cpp:106] Creating Layer conv1
I1022 10:46:51.162391 8536 net.cpp:454] conv1 <- data
I1022 10:46:51.162396 8536 net.cpp:411] conv1 -> conv1
I1022 10:46:51.162612 8536 net.cpp:150] Setting up conv1
I1022 10:46:51.162618 8536 net.cpp:157] Top shape: 100 32 32 32 (3276800)
I1022 10:46:51.162621 8536 net.cpp:165] Memory required for data: 14337200
I1022 10:46:51.162628 8536 layer_factory.hpp:76] Creating layer pool1
I1022 10:46:51.162634 8536 net.cpp:106] Creating Layer pool1
I1022 10:46:51.162636 8536 net.cpp:454] pool1 <- conv1
I1022 10:46:51.162641 8536 net.cpp:411] pool1 -> pool1
I1022 10:46:51.162667 8536 net.cpp:150] Setting up pool1
I1022 10:46:51.162672 8536 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:46:51.162673 8536 net.cpp:165] Memory required for data: 17614000
I1022 10:46:51.162675 8536 layer_factory.hpp:76] Creating layer Sigmoid1
I1022 10:46:51.162679 8536 net.cpp:106] Creating Layer Sigmoid1
I1022 10:46:51.162681 8536 net.cpp:454] Sigmoid1 <- pool1
I1022 10:46:51.162684 8536 net.cpp:397] Sigmoid1 -> pool1 (in-place)
I1022 10:46:51.162688 8536 net.cpp:150] Setting up Sigmoid1
I1022 10:46:51.162691 8536 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:46:51.162693 8536 net.cpp:165] Memory required for data: 20890800
I1022 10:46:51.162710 8536 layer_factory.hpp:76] Creating layer conv2
I1022 10:46:51.162716 8536 net.cpp:106] Creating Layer conv2
I1022 10:46:51.162719 8536 net.cpp:454] conv2 <- pool1
I1022 10:46:51.162724 8536 net.cpp:411] conv2 -> conv2
I1022 10:46:51.163525 8536 net.cpp:150] Setting up conv2
I1022 10:46:51.163532 8536 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:46:51.163533 8536 net.cpp:165] Memory required for data: 24167600
I1022 10:46:51.163539 8536 layer_factory.hpp:76] Creating layer Sigmoid2
I1022 10:46:51.163543 8536 net.cpp:106] Creating Layer Sigmoid2
I1022 10:46:51.163545 8536 net.cpp:454] Sigmoid2 <- conv2
I1022 10:46:51.163548 8536 net.cpp:397] Sigmoid2 -> conv2 (in-place)
I1022 10:46:51.163552 8536 net.cpp:150] Setting up Sigmoid2
I1022 10:46:51.163555 8536 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:46:51.163558 8536 net.cpp:165] Memory required for data: 27444400
I1022 10:46:51.163558 8536 layer_factory.hpp:76] Creating layer pool2
I1022 10:46:51.163563 8536 net.cpp:106] Creating Layer pool2
I1022 10:46:51.163564 8536 net.cpp:454] pool2 <- conv2
I1022 10:46:51.163568 8536 net.cpp:411] pool2 -> pool2
I1022 10:46:51.163580 8536 net.cpp:150] Setting up pool2
I1022 10:46:51.163586 8536 net.cpp:157] Top shape: 100 32 8 8 (204800)
I1022 10:46:51.163588 8536 net.cpp:165] Memory required for data: 28263600
I1022 10:46:51.163589 8536 layer_factory.hpp:76] Creating layer conv3
I1022 10:46:51.163596 8536 net.cpp:106] Creating Layer conv3
I1022 10:46:51.163599 8536 net.cpp:454] conv3 <- pool2
I1022 10:46:51.163602 8536 net.cpp:411] conv3 -> conv3
I1022 10:46:51.165074 8536 net.cpp:150] Setting up conv3
I1022 10:46:51.165084 8536 net.cpp:157] Top shape: 100 64 8 8 (409600)
I1022 10:46:51.165086 8536 net.cpp:165] Memory required for data: 29902000
I1022 10:46:51.165092 8536 layer_factory.hpp:76] Creating layer Sigmoid3
I1022 10:46:51.165096 8536 net.cpp:106] Creating Layer Sigmoid3
I1022 10:46:51.165098 8536 net.cpp:454] Sigmoid3 <- conv3
I1022 10:46:51.165102 8536 net.cpp:397] Sigmoid3 -> conv3 (in-place)
I1022 10:46:51.165105 8536 net.cpp:150] Setting up Sigmoid3
I1022 10:46:51.165108 8536 net.cpp:157] Top shape: 100 64 8 8 (409600)
I1022 10:46:51.165110 8536 net.cpp:165] Memory required for data: 31540400
I1022 10:46:51.165112 8536 layer_factory.hpp:76] Creating layer pool3
I1022 10:46:51.165117 8536 net.cpp:106] Creating Layer pool3
I1022 10:46:51.165117 8536 net.cpp:454] pool3 <- conv3
I1022 10:46:51.165122 8536 net.cpp:411] pool3 -> pool3
I1022 10:46:51.165137 8536 net.cpp:150] Setting up pool3
I1022 10:46:51.165158 8536 net.cpp:157] Top shape: 100 64 4 4 (102400)
I1022 10:46:51.165169 8536 net.cpp:165] Memory required for data: 31950000
I1022 10:46:51.165179 8536 layer_factory.hpp:76] Creating layer ip1
I1022 10:46:51.165191 8536 net.cpp:106] Creating Layer ip1
I1022 10:46:51.165202 8536 net.cpp:454] ip1 <- pool3
I1022 10:46:51.165215 8536 net.cpp:411] ip1 -> ip1
I1022 10:46:51.165566 8536 net.cpp:150] Setting up ip1
I1022 10:46:51.165573 8536 net.cpp:157] Top shape: 100 10 (1000)
I1022 10:46:51.165575 8536 net.cpp:165] Memory required for data: 31954000
I1022 10:46:51.165580 8536 layer_factory.hpp:76] Creating layer ip1_ip1_0_split
I1022 10:46:51.165585 8536 net.cpp:106] Creating Layer ip1_ip1_0_split
I1022 10:46:51.165587 8536 net.cpp:454] ip1_ip1_0_split <- ip1
I1022 10:46:51.165591 8536 net.cpp:411] ip1_ip1_0_split -> ip1_ip1_0_split_0
I1022 10:46:51.165594 8536 net.cpp:411] ip1_ip1_0_split -> ip1_ip1_0_split_1
I1022 10:46:51.165618 8536 net.cpp:150] Setting up ip1_ip1_0_split
I1022 10:46:51.165623 8536 net.cpp:157] Top shape: 100 10 (1000)
I1022 10:46:51.165627 8536 net.cpp:157] Top shape: 100 10 (1000)
I1022 10:46:51.165628 8536 net.cpp:165] Memory required for data: 31962000
I1022 10:46:51.165630 8536 layer_factory.hpp:76] Creating layer accuracy
I1022 10:46:51.165637 8536 net.cpp:106] Creating Layer accuracy
I1022 10:46:51.165640 8536 net.cpp:454] accuracy <- ip1_ip1_0_split_0
I1022 10:46:51.165643 8536 net.cpp:454] accuracy <- label_cifar_1_split_0
I1022 10:46:51.165668 8536 net.cpp:411] accuracy -> accuracy
I1022 10:46:51.165678 8536 net.cpp:150] Setting up accuracy
I1022 10:46:51.165690 8536 net.cpp:157] Top shape: (1)
I1022 10:46:51.165701 8536 net.cpp:165] Memory required for data: 31962004
I1022 10:46:51.165710 8536 layer_factory.hpp:76] Creating layer loss
I1022 10:46:51.165722 8536 net.cpp:106] Creating Layer loss
I1022 10:46:51.165732 8536 net.cpp:454] loss <- ip1_ip1_0_split_1
I1022 10:46:51.165743 8536 net.cpp:454] loss <- label_cifar_1_split_1
I1022 10:46:51.165756 8536 net.cpp:411] loss -> loss
I1022 10:46:51.165771 8536 layer_factory.hpp:76] Creating layer loss
I1022 10:46:51.165832 8536 net.cpp:150] Setting up loss
I1022 10:46:51.165848 8536 net.cpp:157] Top shape: (1)
I1022 10:46:51.165856 8536 net.cpp:160] with loss weight 1
I1022 10:46:51.165873 8536 net.cpp:165] Memory required for data: 31962008
I1022 10:46:51.165881 8536 net.cpp:226] loss needs backward computation.
I1022 10:46:51.165890 8536 net.cpp:228] accuracy does not need backward computation.
I1022 10:46:51.165900 8536 net.cpp:226] ip1_ip1_0_split needs backward computation.
I1022 10:46:51.165910 8536 net.cpp:226] ip1 needs backward computation.
I1022 10:46:51.165918 8536 net.cpp:226] pool3 needs backward computation.
I1022 10:46:51.165927 8536 net.cpp:226] Sigmoid3 needs backward computation.
I1022 10:46:51.165935 8536 net.cpp:226] conv3 needs backward computation.
I1022 10:46:51.165945 8536 net.cpp:226] pool2 needs backward computation.
I1022 10:46:51.165953 8536 net.cpp:226] Sigmoid2 needs backward computation.
I1022 10:46:51.165962 8536 net.cpp:226] conv2 needs backward computation.
I1022 10:46:51.165971 8536 net.cpp:226] Sigmoid1 needs backward computation.
I1022 10:46:51.165979 8536 net.cpp:226] pool1 needs backward computation.
I1022 10:46:51.165988 8536 net.cpp:226] conv1 needs backward computation.
I1022 10:46:51.165998 8536 net.cpp:228] label_cifar_1_split does not need backward computation.
I1022 10:46:51.166007 8536 net.cpp:228] cifar does not need backward computation.
I1022 10:46:51.166016 8536 net.cpp:270] This network produces output accuracy
I1022 10:46:51.166025 8536 net.cpp:270] This network produces output loss
I1022 10:46:51.166043 8536 net.cpp:283] Network initialization done.
I1022 10:46:51.166105 8536 solver.cpp:59] Solver scaffolding done.
I1022 10:46:51.166326 8536 caffe.cpp:212] Starting Optimization
I1022 10:46:51.166334 8536 solver.cpp:287] Solving CIFAR10_full
I1022 10:46:51.166337 8536 solver.cpp:288] Learning Rate Policy: multistep
I1022 10:46:51.166899 8536 solver.cpp:340] Iteration 0, Testing net (#0)
I1022 10:46:53.488729 8536 solver.cpp:408] Test net output #0: accuracy = 0.1
I1022 10:46:53.488755 8536 solver.cpp:408] Test net output #1: loss = 2.31366 (* 1 = 2.31366 loss)
I1022 10:46:53.521021 8536 solver.cpp:236] Iteration 0, loss = 2.294
I1022 10:46:53.521047 8536 solver.cpp:252] Train net output #0: loss = 2.294 (* 1 = 2.294 loss)
I1022 10:46:53.521064 8536 sgd_solver.cpp:106] Iteration 0, lr = 0.001
I1022 10:46:58.813424 8536 solver.cpp:236] Iteration 100, loss = 2.30219
I1022 10:46:58.813460 8536 solver.cpp:252] Train net output #0: loss = 2.30219 (* 1 = 2.30219 loss)
I1022 10:46:58.813465 8536 sgd_solver.cpp:106] Iteration 100, lr = 0.001
I1022 10:47:04.035897 8536 solver.cpp:236] Iteration 200, loss = 2.29944
I1022 10:47:04.035923 8536 solver.cpp:252] Train net output #0: loss = 2.29944 (* 1 = 2.29944 loss)
I1022 10:47:04.035928 8536 sgd_solver.cpp:106] Iteration 200, lr = 0.001
I1022 10:47:09.264737 8536 solver.cpp:236] Iteration 300, loss = 2.29272
I1022 10:47:09.264763 8536 solver.cpp:252] Train net output #0: loss = 2.29272 (* 1 = 2.29272 loss)
I1022 10:47:09.264770 8536 sgd_solver.cpp:106] Iteration 300, lr = 0.001
I1022 10:47:16.553303 8536 solver.cpp:236] Iteration 400, loss = 2.31916
I1022 10:47:16.553328 8536 solver.cpp:252] Train net output #0: loss = 2.31916 (* 1 = 2.31916 loss)
I1022 10:47:16.553334 8536 sgd_solver.cpp:106] Iteration 400, lr = 0.001
I1022 10:47:27.018857 8536 solver.cpp:236] Iteration 500, loss = 2.30496
I1022 10:47:27.018935 8536 solver.cpp:252] Train net output #0: loss = 2.30496 (* 1 = 2.30496 loss)
I1022 10:47:27.018942 8536 sgd_solver.cpp:106] Iteration 500, lr = 0.001
I1022 10:47:37.502616 8536 solver.cpp:236] Iteration 600, loss = 2.30169
I1022 10:47:37.502653 8536 solver.cpp:252] Train net output #0: loss = 2.30169 (* 1 = 2.30169 loss)
I1022 10:47:37.502658 8536 sgd_solver.cpp:106] Iteration 600, lr = 0.001
I1022 10:47:48.146152 8536 solver.cpp:236] Iteration 700, loss = 2.29867
I1022 10:47:48.146188 8536 solver.cpp:252] Train net output #0: loss = 2.29867 (* 1 = 2.29867 loss)
I1022 10:47:48.146194 8536 sgd_solver.cpp:106] Iteration 700, lr = 0.001
I1022 10:47:58.747323 8536 solver.cpp:236] Iteration 800, loss = 2.29204
I1022 10:47:58.747630 8536 solver.cpp:252] Train net output #0: loss = 2.29204 (* 1 = 2.29204 loss)
I1022 10:47:58.747637 8536 sgd_solver.cpp:106] Iteration 800, lr = 0.001
I1022 10:48:09.425886 8536 solver.cpp:236] Iteration 900, loss = 2.31886
I1022 10:48:09.425925 8536 solver.cpp:252] Train net output #0: loss = 2.31886 (* 1 = 2.31886 loss)
I1022 10:48:09.425930 8536 sgd_solver.cpp:106] Iteration 900, lr = 0.001
I1022 10:48:19.959596 8536 solver.cpp:340] Iteration 1000, Testing net (#0)
I1022 10:48:25.035497 8536 solver.cpp:408] Test net output #0: accuracy = 0.1
I1022 10:48:25.035532 8536 solver.cpp:408] Test net output #1: loss = 2.30539 (* 1 = 2.30539 loss)
I1022 10:48:25.095721 8536 solver.cpp:236] Iteration 1000, loss = 2.30494
I1022 10:48:25.095741 8536 solver.cpp:252] Train net output #0: loss = 2.30494 (* 1 = 2.30494 loss)
I1022 10:48:25.095747 8536 sgd_solver.cpp:106] Iteration 1000, lr = 0.001
I1022 10:48:35.566923 8536 solver.cpp:236] Iteration 1100, loss = 2.29925
I1022 10:48:35.567010 8536 solver.cpp:252] Train net output #0: loss = 2.29925 (* 1 = 2.29925 loss)
I1022 10:48:35.567015 8536 sgd_solver.cpp:106] Iteration 1100, lr = 0.001
I1022 10:48:46.141048 8536 solver.cpp:236] Iteration 1200, loss = 2.29594
I1022 10:48:46.141077 8536 solver.cpp:252] Train net output #0: loss = 2.29594 (* 1 = 2.29594 loss)
I1022 10:48:46.141083 8536 sgd_solver.cpp:106] Iteration 1200, lr = 0.001
I1022 10:48:56.712587 8536 solver.cpp:236] Iteration 1300, loss = 2.28786
I1022 10:48:56.712626 8536 solver.cpp:252] Train net output #0: loss = 2.28786 (* 1 = 2.28786 loss)
I1022 10:48:56.712631 8536 sgd_solver.cpp:106] Iteration 1300, lr = 0.001
I1022 10:49:07.135187 8536 solver.cpp:236] Iteration 1400, loss = 2.31282
I1022 10:49:07.135274 8536 solver.cpp:252] Train net output #0: loss = 2.31282 (* 1 = 2.31282 loss)
I1022 10:49:07.135280 8536 sgd_solver.cpp:106] Iteration 1400, lr = 0.001
I1022 10:49:17.569702 8536 solver.cpp:236] Iteration 1500, loss = 2.29396
I1022 10:49:17.569737 8536 solver.cpp:252] Train net output #0: loss = 2.29396 (* 1 = 2.29396 loss)
I1022 10:49:17.569743 8536 sgd_solver.cpp:106] Iteration 1500, lr = 0.001
I1022 10:49:28.073379 8536 solver.cpp:236] Iteration 1600, loss = 2.24384
I1022 10:49:28.073416 8536 solver.cpp:252] Train net output #0: loss = 2.24384 (* 1 = 2.24384 loss)
I1022 10:49:28.073421 8536 sgd_solver.cpp:106] Iteration 1600, lr = 0.001
I1022 10:49:38.646904 8536 solver.cpp:236] Iteration 1700, loss = 2.20429
I1022 10:49:38.646970 8536 solver.cpp:252] Train net output #0: loss = 2.20429 (* 1 = 2.20429 loss)
I1022 10:49:38.646976 8536 sgd_solver.cpp:106] Iteration 1700, lr = 0.001
I1022 10:49:49.163851 8536 solver.cpp:236] Iteration 1800, loss = 2.08868
I1022 10:49:49.163887 8536 solver.cpp:252] Train net output #0: loss = 2.08868 (* 1 = 2.08868 loss)
I1022 10:49:49.163892 8536 sgd_solver.cpp:106] Iteration 1800, lr = 0.001
I1022 10:49:59.671581 8536 solver.cpp:236] Iteration 1900, loss = 2.08125
I1022 10:49:59.671609 8536 solver.cpp:252] Train net output #0: loss = 2.08125 (* 1 = 2.08125 loss)
I1022 10:49:59.671617 8536 sgd_solver.cpp:106] Iteration 1900, lr = 0.001
I1022 10:50:10.055730 8536 solver.cpp:340] Iteration 2000, Testing net (#0)
I1022 10:50:15.141084 8536 solver.cpp:408] Test net output #0: accuracy = 0.2355
I1022 10:50:15.141109 8536 solver.cpp:408] Test net output #1: loss = 2.05127 (* 1 = 2.05127 loss)
I1022 10:50:15.208761 8536 solver.cpp:236] Iteration 2000, loss = 2.11215
I1022 10:50:15.208791 8536 solver.cpp:252] Train net output #0: loss = 2.11215 (* 1 = 2.11215 loss)
I1022 10:50:15.208796 8536 sgd_solver.cpp:106] Iteration 2000, lr = 0.001
I1022 10:50:25.757208 8536 solver.cpp:236] Iteration 2100, loss = 2.0152
I1022 10:50:25.757263 8536 solver.cpp:252] Train net output #0: loss = 2.0152 (* 1 = 2.0152 loss)
I1022 10:50:25.757274 8536 sgd_solver.cpp:106] Iteration 2100, lr = 0.001
I1022 10:50:36.272904 8536 solver.cpp:236] Iteration 2200, loss = 2.06809
I1022 10:50:36.272940 8536 solver.cpp:252] Train net output #0: loss = 2.06809 (* 1 = 2.06809 loss)
I1022 10:50:36.272946 8536 sgd_solver.cpp:106] Iteration 2200, lr = 0.001
I1022 10:50:46.837977 8536 solver.cpp:236] Iteration 2300, loss = 1.93628
I1022 10:50:46.838059 8536 solver.cpp:252] Train net output #0: loss = 1.93628 (* 1 = 1.93628 loss)
I1022 10:50:46.838064 8536 sgd_solver.cpp:106] Iteration 2300, lr = 0.001
I1022 10:50:57.433101 8536 solver.cpp:236] Iteration 2400, loss = 1.99968
I1022 10:50:57.433138 8536 solver.cpp:252] Train net output #0: loss = 1.99968 (* 1 = 1.99968 loss)
I1022 10:50:57.433145 8536 sgd_solver.cpp:106] Iteration 2400, lr = 0.001
I1022 10:51:07.859019 8536 solver.cpp:236] Iteration 2500, loss = 2.02649
I1022 10:51:07.859055 8536 solver.cpp:252] Train net output #0: loss = 2.02649 (* 1 = 2.02649 loss)
I1022 10:51:07.859061 8536 sgd_solver.cpp:106] Iteration 2500, lr = 0.001
I1022 10:51:18.373309 8536 solver.cpp:236] Iteration 2600, loss = 1.94685
I1022 10:51:18.373383 8536 solver.cpp:252] Train net output #0: loss = 1.94685 (* 1 = 1.94685 loss)
I1022 10:51:18.373389 8536 sgd_solver.cpp:106] Iteration 2600, lr = 0.001
I1022 10:51:28.929222 8536 solver.cpp:236] Iteration 2700, loss = 2.01653
I1022 10:51:28.929262 8536 solver.cpp:252] Train net output #0: loss = 2.01653 (* 1 = 2.01653 loss)
I1022 10:51:28.929267 8536 sgd_solver.cpp:106] Iteration 2700, lr = 0.001
I1022 10:51:39.529273 8536 solver.cpp:236] Iteration 2800, loss = 1.87566
I1022 10:51:39.529299 8536 solver.cpp:252] Train net output #0: loss = 1.87566 (* 1 = 1.87566 loss)
I1022 10:51:39.529304 8536 sgd_solver.cpp:106] Iteration 2800, lr = 0.001
I1022 10:51:50.083822 8536 solver.cpp:236] Iteration 2900, loss = 1.92571
I1022 10:51:50.083925 8536 solver.cpp:252] Train net output #0: loss = 1.92571 (* 1 = 1.92571 loss)
I1022 10:51:50.083938 8536 sgd_solver.cpp:106] Iteration 2900, lr = 0.001
I1022 10:52:00.507814 8536 solver.cpp:340] Iteration 3000, Testing net (#0)
I1022 10:52:05.738812 8536 solver.cpp:408] Test net output #0: accuracy = 0.2823
I1022 10:52:05.738847 8536 solver.cpp:408] Test net output #1: loss = 1.94049 (* 1 = 1.94049 loss)
I1022 10:52:05.809937 8536 solver.cpp:236] Iteration 3000, loss = 2.00196
I1022 10:52:05.809964 8536 solver.cpp:252] Train net output #0: loss = 2.00196 (* 1 = 2.00196 loss)
I1022 10:52:05.809972 8536 sgd_solver.cpp:106] Iteration 3000, lr = 0.001
I1022 10:52:16.504825 8536 solver.cpp:236] Iteration 3100, loss = 1.88657
I1022 10:52:16.504851 8536 solver.cpp:252] Train net output #0: loss = 1.88657 (* 1 = 1.88657 loss)
I1022 10:52:16.504856 8536 sgd_solver.cpp:106] Iteration 3100, lr = 0.001
I1022 10:52:27.067658 8536 solver.cpp:236] Iteration 3200, loss = 1.96299
I1022 10:52:27.067760 8536 solver.cpp:252] Train net output #0: loss = 1.96299 (* 1 = 1.96299 loss)
I1022 10:52:27.067766 8536 sgd_solver.cpp:106] Iteration 3200, lr = 0.001
I1022 10:52:37.625941 8536 solver.cpp:236] Iteration 3300, loss = 1.8326
I1022 10:52:37.625978 8536 solver.cpp:252] Train net output #0: loss = 1.8326 (* 1 = 1.8326 loss)
I1022 10:52:37.625984 8536 sgd_solver.cpp:106] Iteration 3300, lr = 0.001
I1022 10:52:48.165839 8536 solver.cpp:236] Iteration 3400, loss = 1.8503
I1022 10:52:48.165876 8536 solver.cpp:252] Train net output #0: loss = 1.8503 (* 1 = 1.8503 loss)
I1022 10:52:48.165881 8536 sgd_solver.cpp:106] Iteration 3400, lr = 0.001
I1022 10:52:58.601848 8536 solver.cpp:236] Iteration 3500, loss = 1.97493
I1022 10:52:58.601970 8536 solver.cpp:252] Train net output #0: loss = 1.97493 (* 1 = 1.97493 loss)
I1022 10:52:58.601986 8536 sgd_solver.cpp:106] Iteration 3500, lr = 0.001
I1022 10:53:08.896368 8536 solver.cpp:236] Iteration 3600, loss = 1.85193
I1022 10:53:08.896407 8536 solver.cpp:252] Train net output #0: loss = 1.85193 (* 1 = 1.85193 loss)
I1022 10:53:08.896414 8536 sgd_solver.cpp:106] Iteration 3600, lr = 0.001
I1022 10:53:19.378051 8536 solver.cpp:236] Iteration 3700, loss = 1.94166
I1022 10:53:19.378079 8536 solver.cpp:252] Train net output #0: loss = 1.94166 (* 1 = 1.94166 loss)
I1022 10:53:19.378085 8536 sgd_solver.cpp:106] Iteration 3700, lr = 0.001
I1022 10:53:29.866140 8536 solver.cpp:236] Iteration 3800, loss = 1.76446
I1022 10:53:29.866227 8536 solver.cpp:252] Train net output #0: loss = 1.76446 (* 1 = 1.76446 loss)
I1022 10:53:29.866233 8536 sgd_solver.cpp:106] Iteration 3800, lr = 0.001
I1022 10:53:40.353912 8536 solver.cpp:236] Iteration 3900, loss = 1.81656
I1022 10:53:40.353950 8536 solver.cpp:252] Train net output #0: loss = 1.81656 (* 1 = 1.81656 loss)
I1022 10:53:40.353956 8536 sgd_solver.cpp:106] Iteration 3900, lr = 0.001
I1022 10:53:50.744300 8536 solver.cpp:340] Iteration 4000, Testing net (#0)
I1022 10:53:55.826562 8536 solver.cpp:408] Test net output #0: accuracy = 0.3026
I1022 10:53:55.826597 8536 solver.cpp:408] Test net output #1: loss = 1.8486 (* 1 = 1.8486 loss)
I1022 10:53:55.895066 8536 solver.cpp:236] Iteration 4000, loss = 1.93899
I1022 10:53:55.895099 8536 solver.cpp:252] Train net output #0: loss = 1.93899 (* 1 = 1.93899 loss)
I1022 10:53:55.895105 8536 sgd_solver.cpp:106] Iteration 4000, lr = 0.001
I1022 10:54:06.391891 8536 solver.cpp:236] Iteration 4100, loss = 1.77812
I1022 10:54:06.391999 8536 solver.cpp:252] Train net output #0: loss = 1.77812 (* 1 = 1.77812 loss)
I1022 10:54:06.392014 8536 sgd_solver.cpp:106] Iteration 4100, lr = 0.001
I1022 10:54:16.879597 8536 solver.cpp:236] Iteration 4200, loss = 1.90906
I1022 10:54:16.879633 8536 solver.cpp:252] Train net output #0: loss = 1.90906 (* 1 = 1.90906 loss)
I1022 10:54:16.879639 8536 sgd_solver.cpp:106] Iteration 4200, lr = 0.001
I1022 10:54:27.365455 8536 solver.cpp:236] Iteration 4300, loss = 1.70922
I1022 10:54:27.365483 8536 solver.cpp:252] Train net output #0: loss = 1.70922 (* 1 = 1.70922 loss)
I1022 10:54:27.365488 8536 sgd_solver.cpp:106] Iteration 4300, lr = 0.001
I1022 10:54:37.859992 8536 solver.cpp:236] Iteration 4400, loss = 1.79718
I1022 10:54:37.860100 8536 solver.cpp:252] Train net output #0: loss = 1.79718 (* 1 = 1.79718 loss)
I1022 10:54:37.860105 8536 sgd_solver.cpp:106] Iteration 4400, lr = 0.001
I1022 10:54:48.347522 8536 solver.cpp:236] Iteration 4500, loss = 1.95083
I1022 10:54:48.347558 8536 solver.cpp:252] Train net output #0: loss = 1.95083 (* 1 = 1.95083 loss)
I1022 10:54:48.347563 8536 sgd_solver.cpp:106] Iteration 4500, lr = 0.001
I1022 10:54:58.794389 8536 solver.cpp:236] Iteration 4600, loss = 1.76111
I1022 10:54:58.794441 8536 solver.cpp:252] Train net output #0: loss = 1.76111 (* 1 = 1.76111 loss)
I1022 10:54:58.794453 8536 sgd_solver.cpp:106] Iteration 4600, lr = 0.001
I1022 10:55:09.395520 8536 solver.cpp:236] Iteration 4700, loss = 1.85389
I1022 10:55:09.395634 8536 solver.cpp:252] Train net output #0: loss = 1.85389 (* 1 = 1.85389 loss)
I1022 10:55:09.395640 8536 sgd_solver.cpp:106] Iteration 4700, lr = 0.001
I1022 10:55:19.962901 8536 solver.cpp:236] Iteration 4800, loss = 1.69763
I1022 10:55:19.962941 8536 solver.cpp:252] Train net output #0: loss = 1.69763 (* 1 = 1.69763 loss)
I1022 10:55:19.962945 8536 sgd_solver.cpp:106] Iteration 4800, lr = 0.001
I1022 10:55:30.501989 8536 solver.cpp:236] Iteration 4900, loss = 1.7594
I1022 10:55:30.502025 8536 solver.cpp:252] Train net output #0: loss = 1.7594 (* 1 = 1.7594 loss)
I1022 10:55:30.502030 8536 sgd_solver.cpp:106] Iteration 4900, lr = 0.001
I1022 10:55:40.928983 8536 solver.cpp:340] Iteration 5000, Testing net (#0)
I1022 10:55:45.998831 8536 solver.cpp:408] Test net output #0: accuracy = 0.308
I1022 10:55:45.998867 8536 solver.cpp:408] Test net output #1: loss = 1.80463 (* 1 = 1.80463 loss)
I1022 10:55:46.066536 8536 solver.cpp:236] Iteration 5000, loss = 1.93135
I1022 10:55:46.066562 8536 solver.cpp:252] Train net output #0: loss = 1.93135 (* 1 = 1.93135 loss)
I1022 10:55:46.066568 8536 sgd_solver.cpp:106] Iteration 5000, lr = 0.001
I1022 10:55:56.585947 8536 solver.cpp:236] Iteration 5100, loss = 1.69818
I1022 10:55:56.585983 8536 solver.cpp:252] Train net output #0: loss = 1.69818 (* 1 = 1.69818 loss)
I1022 10:55:56.585988 8536 sgd_solver.cpp:106] Iteration 5100, lr = 0.001
I1022 10:56:07.120666 8536 solver.cpp:236] Iteration 5200, loss = 1.86811
I1022 10:56:07.120702 8536 solver.cpp:252] Train net output #0: loss = 1.86811 (* 1 = 1.86811 loss)
I1022 10:56:07.120707 8536 sgd_solver.cpp:106] Iteration 5200, lr = 0.001
I1022 10:56:17.630897 8536 solver.cpp:236] Iteration 5300, loss = 1.6767
I1022 10:56:17.630985 8536 solver.cpp:252] Train net output #0: loss = 1.6767 (* 1 = 1.6767 loss)
I1022 10:56:17.630997 8536 sgd_solver.cpp:106] Iteration 5300, lr = 0.001
I1022 10:56:28.141518 8536 solver.cpp:236] Iteration 5400, loss = 1.74997
I1022 10:56:28.141554 8536 solver.cpp:252] Train net output #0: loss = 1.74997 (* 1 = 1.74997 loss)
I1022 10:56:28.141561 8536 sgd_solver.cpp:106] Iteration 5400, lr = 0.001
I1022 10:56:38.661532 8536 solver.cpp:236] Iteration 5500, loss = 1.92415
I1022 10:56:38.661571 8536 solver.cpp:252] Train net output #0: loss = 1.92415 (* 1 = 1.92415 loss)
I1022 10:56:38.661576 8536 sgd_solver.cpp:106] Iteration 5500, lr = 0.001
I1022 10:56:49.181596 8536 solver.cpp:236] Iteration 5600, loss = 1.69164
I1022 10:56:49.181702 8536 solver.cpp:252] Train net output #0: loss = 1.69164 (* 1 = 1.69164 loss)
I1022 10:56:49.181707 8536 sgd_solver.cpp:106] Iteration 5600, lr = 0.001
I1022 10:56:59.446550 8536 solver.cpp:236] Iteration 5700, loss = 1.87641
I1022 10:56:59.446586 8536 solver.cpp:252] Train net output #0: loss = 1.87641 (* 1 = 1.87641 loss)
I1022 10:56:59.446593 8536 sgd_solver.cpp:106] Iteration 5700, lr = 0.001
I1022 10:57:10.012836 8536 solver.cpp:236] Iteration 5800, loss = 1.66923
I1022 10:57:10.012872 8536 solver.cpp:252] Train net output #0: loss = 1.66923 (* 1 = 1.66923 loss)
I1022 10:57:10.012878 8536 sgd_solver.cpp:106] Iteration 5800, lr = 0.001
I1022 10:57:20.575670 8536 solver.cpp:236] Iteration 5900, loss = 1.72808
I1022 10:57:20.575757 8536 solver.cpp:252] Train net output #0: loss = 1.72808 (* 1 = 1.72808 loss)
I1022 10:57:20.575763 8536 sgd_solver.cpp:106] Iteration 5900, lr = 0.001
I1022 10:57:31.017033 8536 solver.cpp:340] Iteration 6000, Testing net (#0)
I1022 10:57:36.112232 8536 solver.cpp:408] Test net output #0: accuracy = 0.3242
I1022 10:57:36.112277 8536 solver.cpp:408] Test net output #1: loss = 1.77918 (* 1 = 1.77918 loss)
I1022 10:57:36.181268 8536 solver.cpp:236] Iteration 6000, loss = 1.90746
I1022 10:57:36.181306 8536 solver.cpp:252] Train net output #0: loss = 1.90746 (* 1 = 1.90746 loss)
I1022 10:57:36.181313 8536 sgd_solver.cpp:106] Iteration 6000, lr = 0.001
I1022 10:57:46.716547 8536 solver.cpp:236] Iteration 6100, loss = 1.68754
I1022 10:57:46.716584 8536 solver.cpp:252] Train net output #0: loss = 1.68754 (* 1 = 1.68754 loss)
I1022 10:57:46.716590 8536 sgd_solver.cpp:106] Iteration 6100, lr = 0.001
I1022 10:57:57.256057 8536 solver.cpp:236] Iteration 6200, loss = 1.8501
I1022 10:57:57.256147 8536 solver.cpp:252] Train net output #0: loss = 1.8501 (* 1 = 1.8501 loss)
I1022 10:57:57.256153 8536 sgd_solver.cpp:106] Iteration 6200, lr = 0.001
I1022 10:58:07.789499 8536 solver.cpp:236] Iteration 6300, loss = 1.66586
I1022 10:58:07.789525 8536 solver.cpp:252] Train net output #0: loss = 1.66586 (* 1 = 1.66586 loss)
I1022 10:58:07.789531 8536 sgd_solver.cpp:106] Iteration 6300, lr = 0.001
I1022 10:58:18.372783 8536 solver.cpp:236] Iteration 6400, loss = 1.72565
I1022 10:58:18.372819 8536 solver.cpp:252] Train net output #0: loss = 1.72565 (* 1 = 1.72565 loss)
I1022 10:58:18.372825 8536 sgd_solver.cpp:106] Iteration 6400, lr = 0.001
I1022 10:58:28.895227 8536 solver.cpp:236] Iteration 6500, loss = 1.91026
I1022 10:58:28.895318 8536 solver.cpp:252] Train net output #0: loss = 1.91026 (* 1 = 1.91026 loss)
I1022 10:58:28.895333 8536 sgd_solver.cpp:106] Iteration 6500, lr = 0.001
I1022 10:58:39.382961 8536 solver.cpp:236] Iteration 6600, loss = 1.66892
I1022 10:58:39.382988 8536 solver.cpp:252] Train net output #0: loss = 1.66892 (* 1 = 1.66892 loss)
I1022 10:58:39.382994 8536 sgd_solver.cpp:106] Iteration 6600, lr = 0.001
I1022 10:58:49.741792 8536 solver.cpp:236] Iteration 6700, loss = 1.86964
I1022 10:58:49.741818 8536 solver.cpp:252] Train net output #0: loss = 1.86964 (* 1 = 1.86964 loss)
I1022 10:58:49.741824 8536 sgd_solver.cpp:106] Iteration 6700, lr = 0.001
I1022 10:59:00.210350 8536 solver.cpp:236] Iteration 6800, loss = 1.65025
I1022 10:59:00.210438 8536 solver.cpp:252] Train net output #0: loss = 1.65025 (* 1 = 1.65025 loss)
I1022 10:59:00.210445 8536 sgd_solver.cpp:106] Iteration 6800, lr = 0.001
I1022 10:59:10.731395 8536 solver.cpp:236] Iteration 6900, loss = 1.68919
I1022 10:59:10.731431 8536 solver.cpp:252] Train net output #0: loss = 1.68919 (* 1 = 1.68919 loss)
I1022 10:59:10.731437 8536 sgd_solver.cpp:106] Iteration 6900, lr = 0.001
I1022 10:59:21.131471 8536 solver.cpp:340] Iteration 7000, Testing net (#0)
I1022 10:59:26.214035 8536 solver.cpp:408] Test net output #0: accuracy = 0.3266
I1022 10:59:26.214061 8536 solver.cpp:408] Test net output #1: loss = 1.76517 (* 1 = 1.76517 loss)
I1022 10:59:26.280478 8536 solver.cpp:236] Iteration 7000, loss = 1.8829
I1022 10:59:26.280509 8536 solver.cpp:252] Train net output #0: loss = 1.8829 (* 1 = 1.8829 loss)
I1022 10:59:26.280515 8536 sgd_solver.cpp:106] Iteration 7000, lr = 0.001
I1022 10:59:36.885742 8536 solver.cpp:236] Iteration 7100, loss = 1.6448
I1022 10:59:36.885826 8536 solver.cpp:252] Train net output #0: loss = 1.6448 (* 1 = 1.6448 loss)
I1022 10:59:36.885838 8536 sgd_solver.cpp:106] Iteration 7100, lr = 0.001
I1022 10:59:47.461748 8536 solver.cpp:236] Iteration 7200, loss = 1.87966
I1022 10:59:47.461779 8536 solver.cpp:252] Train net output #0: loss = 1.87966 (* 1 = 1.87966 loss)
I1022 10:59:47.461784 8536 sgd_solver.cpp:106] Iteration 7200, lr = 0.001
I1022 10:59:57.976933 8536 solver.cpp:236] Iteration 7300, loss = 1.65232
I1022 10:59:57.976960 8536 solver.cpp:252] Train net output #0: loss = 1.65232 (* 1 = 1.65232 loss)
I1022 10:59:57.976965 8536 sgd_solver.cpp:106] Iteration 7300, lr = 0.001
I1022 11:00:08.474916 8536 solver.cpp:236] Iteration 7400, loss = 1.69676
I1022 11:00:08.475021 8536 solver.cpp:252] Train net output #0: loss = 1.69676 (* 1 = 1.69676 loss)
I1022 11:00:08.475033 8536 sgd_solver.cpp:106] Iteration 7400, lr = 0.001
I1022 11:00:19.075892 8536 solver.cpp:236] Iteration 7500, loss = 1.85232
I1022 11:00:19.075929 8536 solver.cpp:252] Train net output #0: loss = 1.85232 (* 1 = 1.85232 loss)
I1022 11:00:19.075934 8536 sgd_solver.cpp:106] Iteration 7500, lr = 0.001
I1022 11:00:29.613118 8536 solver.cpp:236] Iteration 7600, loss = 1.6578
I1022 11:00:29.613145 8536 solver.cpp:252] Train net output #0: loss = 1.6578 (* 1 = 1.6578 loss)
I1022 11:00:29.613150 8536 sgd_solver.cpp:106] Iteration 7600, lr = 0.001
I1022 11:00:40.122006 8536 solver.cpp:236] Iteration 7700, loss = 1.81921
I1022 11:00:40.122114 8536 solver.cpp:252] Train net output #0: loss = 1.81921 (* 1 = 1.81921 loss)
I1022 11:00:40.122129 8536 sgd_solver.cpp:106] Iteration 7700, lr = 0.001
I1022 11:00:50.393288 8536 solver.cpp:236] Iteration 7800, loss = 1.63087
I1022 11:00:50.393326 8536 solver.cpp:252] Train net output #0: loss = 1.63087 (* 1 = 1.63087 loss)
I1022 11:00:50.393331 8536 sgd_solver.cpp:106] Iteration 7800, lr = 0.001
I1022 11:01:00.896307 8536 solver.cpp:236] Iteration 7900, loss = 1.68231
I1022 11:01:00.896335 8536 solver.cpp:252] Train net output #0: loss = 1.68231 (* 1 = 1.68231 loss)
I1022 11:01:00.896340 8536 sgd_solver.cpp:106] Iteration 7900, lr = 0.001
I1022 11:01:11.294525 8536 solver.cpp:340] Iteration 8000, Testing net (#0)
I1022 11:01:16.378751 8536 solver.cpp:408] Test net output #0: accuracy = 0.3361
I1022 11:01:16.378777 8536 solver.cpp:408] Test net output #1: loss = 1.75532 (* 1 = 1.75532 loss)
I1022 11:01:16.446626 8536 solver.cpp:236] Iteration 8000, loss = 1.87086
I1022 11:01:16.446662 8536 solver.cpp:252] Train net output #0: loss = 1.87086 (* 1 = 1.87086 loss)
I1022 11:01:16.446668 8536 sgd_solver.cpp:106] Iteration 8000, lr = 0.001
I1022 11:01:26.943320 8536 solver.cpp:236] Iteration 8100, loss = 1.64373
I1022 11:01:26.943356 8536 solver.cpp:252] Train net output #0: loss = 1.64373 (* 1 = 1.64373 loss)
I1022 11:01:26.943361 8536 sgd_solver.cpp:106] Iteration 8100, lr = 0.001
I1022 11:01:37.441681 8536 solver.cpp:236] Iteration 8200, loss = 1.81089
I1022 11:01:37.441720 8536 solver.cpp:252] Train net output #0: loss = 1.81089 (* 1 = 1.81089 loss)
I1022 11:01:37.441725 8536 sgd_solver.cpp:106] Iteration 8200, lr = 0.001
I1022 11:01:47.950942 8536 solver.cpp:236] Iteration 8300, loss = 1.62866
I1022 11:01:47.951035 8536 solver.cpp:252] Train net output #0: loss = 1.62866 (* 1 = 1.62866 loss)
I1022 11:01:47.951040 8536 sgd_solver.cpp:106] Iteration 8300, lr = 0.001
I1022 11:01:58.455128 8536 solver.cpp:236] Iteration 8400, loss = 1.66818
I1022 11:01:58.455165 8536 solver.cpp:252] Train net output #0: loss = 1.66818 (* 1 = 1.66818 loss)
I1022 11:01:58.455171 8536 sgd_solver.cpp:106] Iteration 8400, lr = 0.001
I1022 11:02:08.955467 8536 solver.cpp:236] Iteration 8500, loss = 1.87937
I1022 11:02:08.955505 8536 solver.cpp:252] Train net output #0: loss = 1.87937 (* 1 = 1.87937 loss)
I1022 11:02:08.955510 8536 sgd_solver.cpp:106] Iteration 8500, lr = 0.001
I1022 11:02:19.468601 8536 solver.cpp:236] Iteration 8600, loss = 1.65065
I1022 11:02:19.468689 8536 solver.cpp:252] Train net output #0: loss = 1.65065 (* 1 = 1.65065 loss)
I1022 11:02:19.468694 8536 sgd_solver.cpp:106] Iteration 8600, lr = 0.001
I1022 11:02:29.970166 8536 solver.cpp:236] Iteration 8700, loss = 1.83875
I1022 11:02:29.970202 8536 solver.cpp:252] Train net output #0: loss = 1.83875 (* 1 = 1.83875 loss)
I1022 11:02:29.970208 8536 sgd_solver.cpp:106] Iteration 8700, lr = 0.001
I1022 11:02:40.421144 8536 solver.cpp:236] Iteration 8800, loss = 1.61695
I1022 11:02:40.421172 8536 solver.cpp:252] Train net output #0: loss = 1.61695 (* 1 = 1.61695 loss)
I1022 11:02:40.421178 8536 sgd_solver.cpp:106] Iteration 8800, lr = 0.001
I1022 11:02:50.732916 8536 solver.cpp:236] Iteration 8900, loss = 1.66214
I1022 11:02:50.733006 8536 solver.cpp:252] Train net output #0: loss = 1.66214 (* 1 = 1.66214 loss)
I1022 11:02:50.733011 8536 sgd_solver.cpp:106] Iteration 8900, lr = 0.001
I1022 11:03:01.170263 8536 solver.cpp:340] Iteration 9000, Testing net (#0)
I1022 11:03:06.284602 8536 solver.cpp:408] Test net output #0: accuracy = 0.3435
I1022 11:03:06.284641 8536 solver.cpp:408] Test net output #1: loss = 1.74518 (* 1 = 1.74518 loss)
I1022 11:03:06.351274 8536 solver.cpp:236] Iteration 9000, loss = 1.86458
I1022 11:03:06.351312 8536 solver.cpp:252] Train net output #0: loss = 1.86458 (* 1 = 1.86458 loss)
I1022 11:03:06.351318 8536 sgd_solver.cpp:106] Iteration 9000, lr = 0.001
I1022 11:03:16.893996 8536 solver.cpp:236] Iteration 9100, loss = 1.63709
I1022 11:03:16.894035 8536 solver.cpp:252] Train net output #0: loss = 1.63709 (* 1 = 1.63709 loss)
I1022 11:03:16.894042 8536 sgd_solver.cpp:106] Iteration 9100, lr = 0.001
I1022 11:03:27.532582 8536 solver.cpp:236] Iteration 9200, loss = 1.81429
I1022 11:03:27.532681 8536 solver.cpp:252] Train net output #0: loss = 1.81429 (* 1 = 1.81429 loss)
I1022 11:03:27.532687 8536 sgd_solver.cpp:106] Iteration 9200, lr = 0.001
I1022 11:03:38.066777 8536 solver.cpp:236] Iteration 9300, loss = 1.60886
I1022 11:03:38.066812 8536 solver.cpp:252] Train net output #0: loss = 1.60886 (* 1 = 1.60886 loss)
I1022 11:03:38.066818 8536 sgd_solver.cpp:106] Iteration 9300, lr = 0.001
I1022 11:03:48.565462 8536 solver.cpp:236] Iteration 9400, loss = 1.65687
I1022 11:03:48.565500 8536 solver.cpp:252] Train net output #0: loss = 1.65687 (* 1 = 1.65687 loss)
I1022 11:03:48.565505 8536 sgd_solver.cpp:106] Iteration 9400, lr = 0.001
I1022 11:03:59.042943 8536 solver.cpp:236] Iteration 9500, loss = 1.87088
I1022 11:03:59.043051 8536 solver.cpp:252] Train net output #0: loss = 1.87088 (* 1 = 1.87088 loss)
I1022 11:03:59.043056 8536 sgd_solver.cpp:106] Iteration 9500, lr = 0.001
I1022 11:04:09.542875 8536 solver.cpp:236] Iteration 9600, loss = 1.62396
I1022 11:04:09.542901 8536 solver.cpp:252] Train net output #0: loss = 1.62396 (* 1 = 1.62396 loss)
I1022 11:04:09.542906 8536 sgd_solver.cpp:106] Iteration 9600, lr = 0.001
I1022 11:04:20.051120 8536 solver.cpp:236] Iteration 9700, loss = 1.82969
I1022 11:04:20.051158 8536 solver.cpp:252] Train net output #0: loss = 1.82969 (* 1 = 1.82969 loss)
I1022 11:04:20.051164 8536 sgd_solver.cpp:106] Iteration 9700, lr = 0.001
I1022 11:04:30.550485 8536 solver.cpp:236] Iteration 9800, loss = 1.61907
I1022 11:04:30.550571 8536 solver.cpp:252] Train net output #0: loss = 1.61907 (* 1 = 1.61907 loss)
I1022 11:04:30.550576 8536 sgd_solver.cpp:106] Iteration 9800, lr = 0.001
I1022 11:04:40.839673 8536 solver.cpp:236] Iteration 9900, loss = 1.64604
I1022 11:04:40.839700 8536 solver.cpp:252] Train net output #0: loss = 1.64604 (* 1 = 1.64604 loss)
I1022 11:04:40.839706 8536 sgd_solver.cpp:106] Iteration 9900, lr = 0.001
I1022 11:04:51.266861 8536 solver.cpp:340] Iteration 10000, Testing net (#0)
I1022 11:04:56.391234 8536 solver.cpp:408] Test net output #0: accuracy = 0.3516
I1022 11:04:56.391273 8536 solver.cpp:408] Test net output #1: loss = 1.72936 (* 1 = 1.72936 loss)
I1022 11:04:56.461269 8536 solver.cpp:236] Iteration 10000, loss = 1.86576
I1022 11:04:56.461308 8536 solver.cpp:252] Train net output #0: loss = 1.86576 (* 1 = 1.86576 loss)
I1022 11:04:56.461313 8536 sgd_solver.cpp:106] Iteration 10000, lr = 0.001
I1022 11:05:06.995796 8536 solver.cpp:236] Iteration 10100, loss = 1.61587
I1022 11:05:06.995857 8536 solver.cpp:252] Train net output #0: loss = 1.61587 (* 1 = 1.61587 loss)
I1022 11:05:06.995862 8536 sgd_solver.cpp:106] Iteration 10100, lr = 0.001
I1022 11:05:17.510552 8536 solver.cpp:236] Iteration 10200, loss = 1.79931
I1022 11:05:17.510587 8536 solver.cpp:252] Train net output #0: loss = 1.79931 (* 1 = 1.79931 loss)
I1022 11:05:17.510592 8536 sgd_solver.cpp:106] Iteration 10200, lr = 0.001
I1022 11:05:28.022467 8536 solver.cpp:236] Iteration 10300, loss = 1.5957
I1022 11:05:28.022495 8536 solver.cpp:252] Train net output #0: loss = 1.5957 (* 1 = 1.5957 loss)
I1022 11:05:28.022500 8536 sgd_solver.cpp:106] Iteration 10300, lr = 0.001
I1022 11:05:38.533931 8536 solver.cpp:236] Iteration 10400, loss = 1.6241
I1022 11:05:38.534000 8536 solver.cpp:252] Train net output #0: loss = 1.6241 (* 1 = 1.6241 loss)
I1022 11:05:38.534006 8536 sgd_solver.cpp:106] Iteration 10400, lr = 0.001
I1022 11:05:49.132987 8536 solver.cpp:236] Iteration 10500, loss = 1.84335
I1022 11:05:49.133025 8536 solver.cpp:252] Train net output #0: loss = 1.84335 (* 1 = 1.84335 loss)
I1022 11:05:49.133030 8536 sgd_solver.cpp:106] Iteration 10500, lr = 0.001
I1022 11:05:59.760514 8536 solver.cpp:236] Iteration 10600, loss = 1.59885
I1022 11:05:59.760542 8536 solver.cpp:252] Train net output #0: loss = 1.59885 (* 1 = 1.59885 loss)
I1022 11:05:59.760547 8536 sgd_solver.cpp:106] Iteration 10600, lr = 0.001
I1022 11:06:10.231766 8536 solver.cpp:236] Iteration 10700, loss = 1.8319
I1022 11:06:10.231876 8536 solver.cpp:252] Train net output #0: loss = 1.8319 (* 1 = 1.8319 loss)
I1022 11:06:10.231884 8536 sgd_solver.cpp:106] Iteration 10700, lr = 0.001
I1022 11:06:20.699128 8536 solver.cpp:236] Iteration 10800, loss = 1.57596
I1022 11:06:20.699163 8536 solver.cpp:252] Train net output #0: loss = 1.57596 (* 1 = 1.57596 loss)
I1022 11:06:20.699168 8536 sgd_solver.cpp:106] Iteration 10800, lr = 0.001
I1022 11:06:31.177182 8536 solver.cpp:236] Iteration 10900, loss = 1.61976
I1022 11:06:31.177217 8536 solver.cpp:252] Train net output #0: loss = 1.61976 (* 1 = 1.61976 loss)
I1022 11:06:31.177222 8536 sgd_solver.cpp:106] Iteration 10900, lr = 0.001
I1022 11:06:41.333619 8536 solver.cpp:340] Iteration 11000, Testing net (#0)
I1022 11:06:46.432960 8536 solver.cpp:408] Test net output #0: accuracy = 0.3564
I1022 11:06:46.432986 8536 solver.cpp:408] Test net output #1: loss = 1.71685 (* 1 = 1.71685 loss)
I1022 11:06:46.501219 8536 solver.cpp:236] Iteration 11000, loss = 1.85086
I1022 11:06:46.501255 8536 solver.cpp:252] Train net output #0: loss = 1.85086 (* 1 = 1.85086 loss)
I1022 11:06:46.501260 8536 sgd_solver.cpp:106] Iteration 11000, lr = 0.001
I1022 11:06:57.333284 8536 solver.cpp:236] Iteration 11100, loss = 1.60589
I1022 11:06:57.333320 8536 solver.cpp:252] Train net output #0: loss = 1.60589 (* 1 = 1.60589 loss)
I1022 11:06:57.333325 8536 sgd_solver.cpp:106] Iteration 11100, lr = 0.001
I1022 11:07:08.047196 8536 solver.cpp:236] Iteration 11200, loss = 1.83182
I1022 11:07:08.047233 8536 solver.cpp:252] Train net output #0: loss = 1.83182 (* 1 = 1.83182 loss)
I1022 11:07:08.047240 8536 sgd_solver.cpp:106] Iteration 11200, lr = 0.001
I1022 11:07:18.816196 8536 solver.cpp:236] Iteration 11300, loss = 1.61551
I1022 11:07:18.816288 8536 solver.cpp:252] Train net output #0: loss = 1.61551 (* 1 = 1.61551 loss)
I1022 11:07:18.816294 8536 sgd_solver.cpp:106] Iteration 11300, lr = 0.001
I1022 11:07:29.498409 8536 solver.cpp:236] Iteration 11400, loss = 1.59523
I1022 11:07:29.498445 8536 solver.cpp:252] Train net output #0: loss = 1.59523 (* 1 = 1.59523 loss)
I1022 11:07:29.498451 8536 sgd_solver.cpp:106] Iteration 11400, lr = 0.001
I1022 11:07:40.003278 8536 solver.cpp:236] Iteration 11500, loss = 1.84488
I1022 11:07:40.003315 8536 solver.cpp:252] Train net output #0: loss = 1.84488 (* 1 = 1.84488 loss)
I1022 11:07:40.003320 8536 sgd_solver.cpp:106] Iteration 11500, lr = 0.001
I1022 11:07:50.503192 8536 solver.cpp:236] Iteration 11600, loss = 1.58921
I1022 11:07:50.503299 8536 solver.cpp:252] Train net output #0: loss = 1.58921 (* 1 = 1.58921 loss)
I1022 11:07:50.503306 8536 sgd_solver.cpp:106] Iteration 11600, lr = 0.001
I1022 11:08:00.999313 8536 solver.cpp:236] Iteration 11700, loss = 1.79409
I1022 11:08:00.999351 8536 solver.cpp:252] Train net output #0: loss = 1.79409 (* 1 = 1.79409 loss)
I1022 11:08:00.999357 8536 sgd_solver.cpp:106] Iteration 11700, lr = 0.001
I1022 11:08:11.489598 8536 solver.cpp:236] Iteration 11800, loss = 1.60815
I1022 11:08:11.489636 8536 solver.cpp:252] Train net output #0: loss = 1.60815 (* 1 = 1.60815 loss)
I1022 11:08:11.489641 8536 sgd_solver.cpp:106] Iteration 11800, lr = 0.001
I1022 11:08:21.969847 8536 solver.cpp:236] Iteration 11900, loss = 1.59865
I1022 11:08:21.969949 8536 solver.cpp:252] Train net output #0: loss = 1.59865 (* 1 = 1.59865 loss)
I1022 11:08:21.969955 8536 sgd_solver.cpp:106] Iteration 11900, lr = 0.001
I1022 11:08:32.275290 8536 solver.cpp:340] Iteration 12000, Testing net (#0)
I1022 11:08:37.402657 8536 solver.cpp:408] Test net output #0: accuracy = 0.3667
I1022 11:08:37.402709 8536 solver.cpp:408] Test net output #1: loss = 1.69912 (* 1 = 1.69912 loss)
I1022 11:08:37.464241 8536 solver.cpp:236] Iteration 12000, loss = 1.84505
I1022 11:08:37.464270 8536 solver.cpp:252] Train net output #0: loss = 1.84505 (* 1 = 1.84505 loss)
I1022 11:08:37.464277 8536 sgd_solver.cpp:106] Iteration 12000, lr = 0.001
I1022 11:08:48.033339 8536 solver.cpp:236] Iteration 12100, loss = 1.59986
I1022 11:08:48.033368 8536 solver.cpp:252] Train net output #0: loss = 1.59986 (* 1 = 1.59986 loss)
I1022 11:08:48.033373 8536 sgd_solver.cpp:106] Iteration 12100, lr = 0.001
I1022 11:08:58.613881 8536 solver.cpp:236] Iteration 12200, loss = 1.80453
I1022 11:08:58.613993 8536 solver.cpp:252] Train net output #0: loss = 1.80453 (* 1 = 1.80453 loss)
I1022 11:08:58.614008 8536 sgd_solver.cpp:106] Iteration 12200, lr = 0.001
I1022 11:09:09.184016 8536 solver.cpp:236] Iteration 12300, loss = 1.5932
I1022 11:09:09.184044 8536 solver.cpp:252] Train net output #0: loss = 1.5932 (* 1 = 1.5932 loss)
I1022 11:09:09.184049 8536 sgd_solver.cpp:106] Iteration 12300, lr = 0.001
I1022 11:09:19.737473 8536 solver.cpp:236] Iteration 12400, loss = 1.592
I1022 11:09:19.737510 8536 solver.cpp:252] Train net output #0: loss = 1.592 (* 1 = 1.592 loss)
I1022 11:09:19.737515 8536 sgd_solver.cpp:106] Iteration 12400, lr = 0.001
I1022 11:09:30.287920 8536 solver.cpp:236] Iteration 12500, loss = 1.83297
I1022 11:09:30.288010 8536 solver.cpp:252] Train net output #0: loss = 1.83297 (* 1 = 1.83297 loss)
I1022 11:09:30.288017 8536 sgd_solver.cpp:106] Iteration 12500, lr = 0.001
I1022 11:09:40.788943 8536 solver.cpp:236] Iteration 12600, loss = 1.58538
I1022 11:09:40.788976 8536 solver.cpp:252] Train net output #0: loss = 1.58538 (* 1 = 1.58538 loss)
I1022 11:09:40.788981 8536 sgd_solver.cpp:106] Iteration 12600, lr = 0.001
I1022 11:09:51.276540 8536 solver.cpp:236] Iteration 12700, loss = 1.75173
I1022 11:09:51.276576 8536 solver.cpp:252] Train net output #0: loss = 1.75173 (* 1 = 1.75173 loss)
I1022 11:09:51.276581 8536 sgd_solver.cpp:106] Iteration 12700, lr = 0.001
I1022 11:10:01.787467 8536 solver.cpp:236] Iteration 12800, loss = 1.58274
I1022 11:10:01.787518 8536 solver.cpp:252] Train net output #0: loss = 1.58274 (* 1 = 1.58274 loss)
I1022 11:10:01.787523 8536 sgd_solver.cpp:106] Iteration 12800, lr = 0.001
I1022 11:10:12.301200 8536 solver.cpp:236] Iteration 12900, loss = 1.56434
I1022 11:10:12.301235 8536 solver.cpp:252] Train net output #0: loss = 1.56434 (* 1 = 1.56434 loss)
I1022 11:10:12.301240 8536 sgd_solver.cpp:106] Iteration 12900, lr = 0.001
I1022 11:10:22.702301 8536 solver.cpp:340] Iteration 13000, Testing net (#0)
I1022 11:10:27.768751 8536 solver.cpp:408] Test net output #0: accuracy = 0.3836
I1022 11:10:27.768797 8536 solver.cpp:408] Test net output #1: loss = 1.67065 (* 1 = 1.67065 loss)
I1022 11:10:27.828917 8536 solver.cpp:236] Iteration 13000, loss = 1.785
I1022 11:10:27.828958 8536 solver.cpp:252] Train net output #0: loss = 1.785 (* 1 = 1.785 loss)
I1022 11:10:27.828971 8536 sgd_solver.cpp:106] Iteration 13000, lr = 0.001
I1022 11:10:38.977177 8536 solver.cpp:236] Iteration 13100, loss = 1.55043
I1022 11:10:38.977267 8536 solver.cpp:252] Train net output #0: loss = 1.55043 (* 1 = 1.55043 loss)
I1022 11:10:38.977272 8536 sgd_solver.cpp:106] Iteration 13100, lr = 0.001
I1022 11:10:49.519579 8536 solver.cpp:236] Iteration 13200, loss = 1.75727
I1022 11:10:49.519615 8536 solver.cpp:252] Train net output #0: loss = 1.75727 (* 1 = 1.75727 loss)
I1022 11:10:49.519621 8536 sgd_solver.cpp:106] Iteration 13200, lr = 0.001
I1022 11:10:59.970903 8536 solver.cpp:236] Iteration 13300, loss = 1.55308
I1022 11:10:59.970929 8536 solver.cpp:252] Train net output #0: loss = 1.55308 (* 1 = 1.55308 loss)
I1022 11:10:59.970935 8536 sgd_solver.cpp:106] Iteration 13300, lr = 0.001
I1022 11:11:10.417140 8536 solver.cpp:236] Iteration 13400, loss = 1.53396
I1022 11:11:10.417229 8536 solver.cpp:252] Train net output #0: loss = 1.53396 (* 1 = 1.53396 loss)
I1022 11:11:10.417244 8536 sgd_solver.cpp:106] Iteration 13400, lr = 0.001
I1022 11:11:20.927251 8536 solver.cpp:236] Iteration 13500, loss = 1.77538
I1022 11:11:20.927286 8536 solver.cpp:252] Train net output #0: loss = 1.77538 (* 1 = 1.77538 loss)
I1022 11:11:20.927292 8536 sgd_solver.cpp:106] Iteration 13500, lr = 0.001
I1022 11:11:31.486376 8536 solver.cpp:236] Iteration 13600, loss = 1.52669
I1022 11:11:31.486413 8536 solver.cpp:252] Train net output #0: loss = 1.52669 (* 1 = 1.52669 loss)
I1022 11:11:31.486420 8536 sgd_solver.cpp:106] Iteration 13600, lr = 0.001
I1022 11:11:42.005573 8536 solver.cpp:236] Iteration 13700, loss = 1.77915
I1022 11:11:42.005667 8536 solver.cpp:252] Train net output #0: loss = 1.77915 (* 1 = 1.77915 loss)
I1022 11:11:42.005672 8536 sgd_solver.cpp:106] Iteration 13700, lr = 0.001
I1022 11:11:52.551605 8536 solver.cpp:236] Iteration 13800, loss = 1.53946
I1022 11:11:52.551642 8536 solver.cpp:252] Train net output #0: loss = 1.53946 (* 1 = 1.53946 loss)
I1022 11:11:52.551650 8536 sgd_solver.cpp:106] Iteration 13800, lr = 0.001
I1022 11:12:03.124747 8536 solver.cpp:236] Iteration 13900, loss = 1.52055
I1022 11:12:03.124783 8536 solver.cpp:252] Train net output #0: loss = 1.52055 (* 1 = 1.52055 loss)
I1022 11:12:03.124789 8536 sgd_solver.cpp:106] Iteration 13900, lr = 0.001
I1022 11:12:13.656524 8536 solver.cpp:340] Iteration 14000, Testing net (#0)
I1022 11:12:18.744698 8536 solver.cpp:408] Test net output #0: accuracy = 0.3958
I1022 11:12:18.744724 8536 solver.cpp:408] Test net output #1: loss = 1.64517 (* 1 = 1.64517 loss)
I1022 11:12:18.813886 8536 solver.cpp:236] Iteration 14000, loss = 1.7507
I1022 11:12:18.813925 8536 solver.cpp:252] Train net output #0: loss = 1.7507 (* 1 = 1.7507 loss)
I1022 11:12:18.813930 8536 sgd_solver.cpp:106] Iteration 14000, lr = 0.001
I1022 11:12:29.110478 8536 solver.cpp:236] Iteration 14100, loss = 1.50475
I1022 11:12:29.110515 8536 solver.cpp:252] Train net output #0: loss = 1.50475 (* 1 = 1.50475 loss)
I1022 11:12:29.110520 8536 sgd_solver.cpp:106] Iteration 14100, lr = 0.001
I1022 11:12:39.656483 8536 solver.cpp:236] Iteration 14200, loss = 1.72815
I1022 11:12:39.656520 8536 solver.cpp:252] Train net output #0: loss = 1.72815 (* 1 = 1.72815 loss)
I1022 11:12:39.656525 8536 sgd_solver.cpp:106] Iteration 14200, lr = 0.001
I1022 11:12:50.243865 8536 solver.cpp:236] Iteration 14300, loss = 1.54549
I1022 11:12:50.243968 8536 solver.cpp:252] Train net output #0: loss = 1.54549 (* 1 = 1.54549 loss)
I1022 11:12:50.243973 8536 sgd_solver.cpp:106] Iteration 14300, lr = 0.001
I1022 11:13:00.794697 8536 solver.cpp:236] Iteration 14400, loss = 1.49902
I1022 11:13:00.794734 8536 solver.cpp:252] Train net output #0: loss = 1.49902 (* 1 = 1.49902 loss)
I1022 11:13:00.794739 8536 sgd_solver.cpp:106] Iteration 14400, lr = 0.001
I1022 11:13:11.538136 8536 solver.cpp:236] Iteration 14500, loss = 1.73908
I1022 11:13:11.538175 8536 solver.cpp:252] Train net output #0: loss = 1.73908 (* 1 = 1.73908 loss)
I1022 11:13:11.538180 8536 sgd_solver.cpp:106] Iteration 14500, lr = 0.001
I1022 11:13:22.039759 8536 solver.cpp:236] Iteration 14600, loss = 1.52042
I1022 11:13:22.039891 8536 solver.cpp:252] Train net output #0: loss = 1.52042 (* 1 = 1.52042 loss)
I1022 11:13:22.039897 8536 sgd_solver.cpp:106] Iteration 14600, lr = 0.001
I1022 11:13:32.508842 8536 solver.cpp:236] Iteration 14700, loss = 1.72301
I1022 11:13:32.508872 8536 solver.cpp:252] Train net output #0: loss = 1.72301 (* 1 = 1.72301 loss)
I1022 11:13:32.508877 8536 sgd_solver.cpp:106] Iteration 14700, lr = 0.001
I1022 11:13:42.999177 8536 solver.cpp:236] Iteration 14800, loss = 1.54298
I1022 11:13:42.999207 8536 solver.cpp:252] Train net output #0: loss = 1.54298 (* 1 = 1.54298 loss)
I1022 11:13:42.999212 8536 sgd_solver.cpp:106] Iteration 14800, lr = 0.001
I1022 11:13:53.459342 8536 solver.cpp:236] Iteration 14900, loss = 1.50204
I1022 11:13:53.459427 8536 solver.cpp:252] Train net output #0: loss = 1.50204 (* 1 = 1.50204 loss)
I1022 11:13:53.459434 8536 sgd_solver.cpp:106] Iteration 14900, lr = 0.001
I1022 11:14:03.834810 8536 solver.cpp:340] Iteration 15000, Testing net (#0)
I1022 11:14:09.060551 8536 solver.cpp:408] Test net output #0: accuracy = 0.4042
I1022 11:14:09.060587 8536 solver.cpp:408] Test net output #1: loss = 1.63019 (* 1 = 1.63019 loss)
I1022 11:14:09.127522 8536 solver.cpp:236] Iteration 15000, loss = 1.73408
I1022 11:14:09.127557 8536 solver.cpp:252] Train net output #0: loss = 1.73408 (* 1 = 1.73408 loss)
I1022 11:14:09.127563 8536 sgd_solver.cpp:106] Iteration 15000, lr = 0.001
I1022 11:14:19.661502 8536 solver.cpp:236] Iteration 15100, loss = 1.50655
I1022 11:14:19.661540 8536 solver.cpp:252] Train net output #0: loss = 1.50655 (* 1 = 1.50655 loss)
I1022 11:14:19.661545 8536 sgd_solver.cpp:106] Iteration 15100, lr = 0.001
I1022 11:14:29.982547 8536 solver.cpp:236] Iteration 15200, loss = 1.70413
I1022 11:14:29.982632 8536 solver.cpp:252] Train net output #0: loss = 1.70413 (* 1 = 1.70413 loss)
I1022 11:14:29.982638 8536 sgd_solver.cpp:106] Iteration 15200, lr = 0.001
I1022 11:14:40.694537 8536 solver.cpp:236] Iteration 15300, loss = 1.5455
I1022 11:14:40.694574 8536 solver.cpp:252] Train net output #0: loss = 1.5455 (* 1 = 1.5455 loss)
I1022 11:14:40.694581 8536 sgd_solver.cpp:106] Iteration 15300, lr = 0.001
I1022 11:14:51.381824 8536 solver.cpp:236] Iteration 15400, loss = 1.48214
I1022 11:14:51.381875 8536 solver.cpp:252] Train net output #0: loss = 1.48214 (* 1 = 1.48214 loss)
I1022 11:14:51.381886 8536 sgd_solver.cpp:106] Iteration 15400, lr = 0.001
I1022 11:15:01.925511 8536 solver.cpp:236] Iteration 15500, loss = 1.72135
I1022 11:15:01.925597 8536 solver.cpp:252] Train net output #0: loss = 1.72135 (* 1 = 1.72135 loss)
I1022 11:15:01.925604 8536 sgd_solver.cpp:106] Iteration 15500, lr = 0.001
I1022 11:15:12.441215 8536 solver.cpp:236] Iteration 15600, loss = 1.50004
I1022 11:15:12.441249 8536 solver.cpp:252] Train net output #0: loss = 1.50004 (* 1 = 1.50004 loss)
I1022 11:15:12.441256 8536 sgd_solver.cpp:106] Iteration 15600, lr = 0.001
I1022 11:15:22.982206 8536 solver.cpp:236] Iteration 15700, loss = 1.69016
I1022 11:15:22.982230 8536 solver.cpp:252] Train net output #0: loss = 1.69016 (* 1 = 1.69016 loss)
I1022 11:15:22.982236 8536 sgd_solver.cpp:106] Iteration 15700, lr = 0.001
I1022 11:15:33.495342 8536 solver.cpp:236] Iteration 15800, loss = 1.51237
I1022 11:15:33.495446 8536 solver.cpp:252] Train net output #0: loss = 1.51237 (* 1 = 1.51237 loss)
I1022 11:15:33.495461 8536 sgd_solver.cpp:106] Iteration 15800, lr = 0.001
I1022 11:15:44.020524 8536 solver.cpp:236] Iteration 15900, loss = 1.46484
I1022 11:15:44.020562 8536 solver.cpp:252] Train net output #0: loss = 1.46484 (* 1 = 1.46484 loss)
I1022 11:15:44.020567 8536 sgd_solver.cpp:106] Iteration 15900, lr = 0.001
I1022 11:15:54.478747 8536 solver.cpp:340] Iteration 16000, Testing net (#0)
I1022 11:15:59.627290 8536 solver.cpp:408] Test net output #0: accuracy = 0.4152
I1022 11:15:59.627323 8536 solver.cpp:408] Test net output #1: loss = 1.61079 (* 1 = 1.61079 loss)
I1022 11:15:59.693795 8536 solver.cpp:236] Iteration 16000, loss = 1.70227
I1022 11:15:59.693831 8536 solver.cpp:252] Train net output #0: loss = 1.70227 (* 1 = 1.70227 loss)
I1022 11:15:59.693838 8536 sgd_solver.cpp:106] Iteration 16000, lr = 0.001
I1022 11:16:10.220052 8536 solver.cpp:236] Iteration 16100, loss = 1.48295
I1022 11:16:10.220165 8536 solver.cpp:252] Train net output #0: loss = 1.48295 (* 1 = 1.48295 loss)
I1022 11:16:10.220170 8536 sgd_solver.cpp:106] Iteration 16100, lr = 0.001
I1022 11:16:20.602689 8536 solver.cpp:236] Iteration 16200, loss = 1.67739
I1022 11:16:20.602717 8536 solver.cpp:252] Train net output #0: loss = 1.67739 (* 1 = 1.67739 loss)
I1022 11:16:20.602725 8536 sgd_solver.cpp:106] Iteration 16200, lr = 0.001
I1022 11:16:31.125738 8536 solver.cpp:236] Iteration 16300, loss = 1.52506
I1022 11:16:31.125776 8536 solver.cpp:252] Train net output #0: loss = 1.52506 (* 1 = 1.52506 loss)
I1022 11:16:31.125780 8536 sgd_solver.cpp:106] Iteration 16300, lr = 0.001
I1022 11:16:41.658015 8536 solver.cpp:236] Iteration 16400, loss = 1.44382
I1022 11:16:41.658126 8536 solver.cpp:252] Train net output #0: loss = 1.44382 (* 1 = 1.44382 loss)
I1022 11:16:41.658141 8536 sgd_solver.cpp:106] Iteration 16400, lr = 0.001
I1022 11:16:52.156821 8536 solver.cpp:236] Iteration 16500, loss = 1.70314
I1022 11:16:52.156857 8536 solver.cpp:252] Train net output #0: loss = 1.70314 (* 1 = 1.70314 loss)
I1022 11:16:52.156862 8536 sgd_solver.cpp:106] Iteration 16500, lr = 0.001
I1022 11:17:04.051683 8536 solver.cpp:236] Iteration 16600, loss = 1.45752
I1022 11:17:04.051722 8536 solver.cpp:252] Train net output #0: loss = 1.45752 (* 1 = 1.45752 loss)
I1022 11:17:04.051728 8536 sgd_solver.cpp:106] Iteration 16600, lr = 0.001
I1022 11:17:19.876646 8536 solver.cpp:236] Iteration 16700, loss = 1.67541
I1022 11:17:19.876736 8536 solver.cpp:252] Train net output #0: loss = 1.67541 (* 1 = 1.67541 loss)
I1022 11:17:19.876749 8536 sgd_solver.cpp:106] Iteration 16700, lr = 0.001
I1022 11:17:35.541496 8536 solver.cpp:236] Iteration 16800, loss = 1.50925
I1022 11:17:35.541533 8536 solver.cpp:252] Train net output #0: loss = 1.50925 (* 1 = 1.50925 loss)
I1022 11:17:35.541538 8536 sgd_solver.cpp:106] Iteration 16800, lr = 0.001
I1022 11:17:51.122148 8536 solver.cpp:236] Iteration 16900, loss = 1.45147
I1022 11:17:51.122239 8536 solver.cpp:252] Train net output #0: loss = 1.45147 (* 1 = 1.45147 loss)
I1022 11:17:51.122246 8536 sgd_solver.cpp:106] Iteration 16900, lr = 0.001
I1022 11:18:06.762167 8536 solver.cpp:340] Iteration 17000, Testing net (#0)
I1022 11:18:14.622251 8536 solver.cpp:408] Test net output #0: accuracy = 0.4195
I1022 11:18:14.622300 8536 solver.cpp:408] Test net output #1: loss = 1.5997 (* 1 = 1.5997 loss)
I1022 11:18:14.727629 8536 solver.cpp:236] Iteration 17000, loss = 1.69983
I1022 11:18:14.727658 8536 solver.cpp:252] Train net output #0: loss = 1.69983 (* 1 = 1.69983 loss)
I1022 11:18:14.727663 8536 sgd_solver.cpp:106] Iteration 17000, lr = 0.001
I1022 11:18:30.554291 8536 solver.cpp:236] Iteration 17100, loss = 1.48101
I1022 11:18:30.554368 8536 solver.cpp:252] Train net output #0: loss = 1.48101 (* 1 = 1.48101 loss)
I1022 11:18:30.554379 8536 sgd_solver.cpp:106] Iteration 17100, lr = 0.001
I1022 11:18:46.366008 8536 solver.cpp:236] Iteration 17200, loss = 1.66837
I1022 11:18:46.366046 8536 solver.cpp:252] Train net output #0: loss = 1.66837 (* 1 = 1.66837 loss)
I1022 11:18:46.366051 8536 sgd_solver.cpp:106] Iteration 17200, lr = 0.001
I1022 11:19:01.853924 8536 solver.cpp:236] Iteration 17300, loss = 1.5098
I1022 11:19:01.854136 8536 solver.cpp:252] Train net output #0: loss = 1.5098 (* 1 = 1.5098 loss)
I1022 11:19:01.854148 8536 sgd_solver.cpp:106] Iteration 17300, lr = 0.001
I1022 11:19:17.490357 8536 solver.cpp:236] Iteration 17400, loss = 1.43694
I1022 11:19:17.490387 8536 solver.cpp:252] Train net output #0: loss = 1.43694 (* 1 = 1.43694 loss)
I1022 11:19:17.490394 8536 sgd_solver.cpp:106] Iteration 17400, lr = 0.001
I1022 11:19:33.085923 8536 solver.cpp:236] Iteration 17500, loss = 1.69321
I1022 11:19:33.086015 8536 solver.cpp:252] Train net output #0: loss = 1.69321 (* 1 = 1.69321 loss)
I1022 11:19:33.086021 8536 sgd_solver.cpp:106] Iteration 17500, lr = 0.001
I1022 11:19:48.616554 8536 solver.cpp:236] Iteration 17600, loss = 1.47778
I1022 11:19:48.616591 8536 solver.cpp:252] Train net output #0: loss = 1.47778 (* 1 = 1.47778 loss)
I1022 11:19:48.616598 8536 sgd_solver.cpp:106] Iteration 17600, lr = 0.001
I1022 11:20:04.112418 8536 solver.cpp:236] Iteration 17700, loss = 1.66035
I1022 11:20:04.112510 8536 solver.cpp:252] Train net output #0: loss = 1.66035 (* 1 = 1.66035 loss)
I1022 11:20:04.112516 8536 sgd_solver.cpp:106] Iteration 17700, lr = 0.001
I1022 11:20:19.785087 8536 solver.cpp:236] Iteration 17800, loss = 1.48203
I1022 11:20:19.785143 8536 solver.cpp:252] Train net output #0: loss = 1.48203 (* 1 = 1.48203 loss)
I1022 11:20:19.785154 8536 sgd_solver.cpp:106] Iteration 17800, lr = 0.001
I1022 11:20:35.435070 8536 solver.cpp:236] Iteration 17900, loss = 1.43121
I1022 11:20:35.435169 8536 solver.cpp:252] Train net output #0: loss = 1.43121 (* 1 = 1.43121 loss)
I1022 11:20:35.435175 8536 sgd_solver.cpp:106] Iteration 17900, lr = 0.001
I1022 11:20:51.206642 8536 solver.cpp:340] Iteration 18000, Testing net (#0)
I1022 11:20:59.075762 8536 solver.cpp:408] Test net output #0: accuracy = 0.4216
I1022 11:20:59.075790 8536 solver.cpp:408] Test net output #1: loss = 1.59268 (* 1 = 1.59268 loss)
I1022 11:20:59.178589 8536 solver.cpp:236] Iteration 18000, loss = 1.68358
I1022 11:20:59.178624 8536 solver.cpp:252] Train net output #0: loss = 1.68358 (* 1 = 1.68358 loss)
I1022 11:20:59.178629 8536 sgd_solver.cpp:106] Iteration 18000, lr = 0.001
I1022 11:21:14.756212 8536 solver.cpp:236] Iteration 18100, loss = 1.47416
I1022 11:21:14.756289 8536 solver.cpp:252] Train net output #0: loss = 1.47416 (* 1 = 1.47416 loss)
I1022 11:21:14.756294 8536 sgd_solver.cpp:106] Iteration 18100, lr = 0.001
I1022 11:21:30.346837 8536 solver.cpp:236] Iteration 18200, loss = 1.66951
I1022 11:21:30.346864 8536 solver.cpp:252] Train net output #0: loss = 1.66951 (* 1 = 1.66951 loss)
I1022 11:21:30.346869 8536 sgd_solver.cpp:106] Iteration 18200, lr = 0.001
I1022 11:21:45.791546 8536 solver.cpp:236] Iteration 18300, loss = 1.47203
I1022 11:21:45.791656 8536 solver.cpp:252] Train net output #0: loss = 1.47203 (* 1 = 1.47203 loss)
I1022 11:21:45.791661 8536 sgd_solver.cpp:106] Iteration 18300, lr = 0.001
I1022 11:22:01.326298 8536 solver.cpp:236] Iteration 18400, loss = 1.41797
I1022 11:22:01.326336 8536 solver.cpp:252] Train net output #0: loss = 1.41797 (* 1 = 1.41797 loss)
I1022 11:22:01.326342 8536 sgd_solver.cpp:106] Iteration 18400, lr = 0.001
I1022 11:22:16.918109 8536 solver.cpp:236] Iteration 18500, loss = 1.68132
I1022 11:22:16.918206 8536 solver.cpp:252] Train net output #0: loss = 1.68132 (* 1 = 1.68132 loss)
I1022 11:22:16.918218 8536 sgd_solver.cpp:106] Iteration 18500, lr = 0.001
I1022 11:22:32.644271 8536 solver.cpp:236] Iteration 18600, loss = 1.48547
I1022 11:22:32.644326 8536 solver.cpp:252] Train net output #0: loss = 1.48547 (* 1 = 1.48547 loss)
I1022 11:22:32.644338 8536 sgd_solver.cpp:106] Iteration 18600, lr = 0.001
I1022 11:22:48.072569 8536 solver.cpp:236] Iteration 18700, loss = 1.65187
I1022 11:22:48.072660 8536 solver.cpp:252] Train net output #0: loss = 1.65187 (* 1 = 1.65187 loss)
I1022 11:22:48.072665 8536 sgd_solver.cpp:106] Iteration 18700, lr = 0.001
I1022 11:23:03.644287 8536 solver.cpp:236] Iteration 18800, loss = 1.46749
I1022 11:23:03.644325 8536 solver.cpp:252] Train net output #0: loss = 1.46749 (* 1 = 1.46749 loss)
I1022 11:23:03.644330 8536 sgd_solver.cpp:106] Iteration 18800, lr = 0.001
I1022 11:23:19.428458 8536 solver.cpp:236] Iteration 18900, loss = 1.40564
I1022 11:23:19.428541 8536 solver.cpp:252] Train net output #0: loss = 1.40564 (* 1 = 1.40564 loss)
I1022 11:23:19.428552 8536 sgd_solver.cpp:106] Iteration 18900, lr = 0.001
I1022 11:23:35.089431 8536 solver.cpp:340] Iteration 19000, Testing net (#0)
I1022 11:23:42.964900 8536 solver.cpp:408] Test net output #0: accuracy = 0.4217
I1022 11:23:42.964949 8536 solver.cpp:408] Test net output #1: loss = 1.58359 (* 1 = 1.58359 loss)
I1022 11:23:43.056345 8536 solver.cpp:236] Iteration 19000, loss = 1.68481
I1022 11:23:43.056387 8536 solver.cpp:252] Train net output #0: loss = 1.68481 (* 1 = 1.68481 loss)
I1022 11:23:43.056393 8536 sgd_solver.cpp:106] Iteration 19000, lr = 0.001
I1022 11:23:58.878793 8536 solver.cpp:236] Iteration 19100, loss = 1.48403
I1022 11:23:58.878886 8536 solver.cpp:252] Train net output #0: loss = 1.48403 (* 1 = 1.48403 loss)
I1022 11:23:58.878895 8536 sgd_solver.cpp:106] Iteration 19100, lr = 0.001
I1022 11:24:14.850080 8536 solver.cpp:236] Iteration 19200, loss = 1.62435
I1022 11:24:14.850108 8536 solver.cpp:252] Train net output #0: loss = 1.62435 (* 1 = 1.62435 loss)
I1022 11:24:14.850114 8536 sgd_solver.cpp:106] Iteration 19200, lr = 0.001
I1022 11:24:30.725255 8536 solver.cpp:236] Iteration 19300, loss = 1.46326
I1022 11:24:30.725335 8536 solver.cpp:252] Train net output #0: loss = 1.46326 (* 1 = 1.46326 loss)
I1022 11:24:30.725342 8536 sgd_solver.cpp:106] Iteration 19300, lr = 0.001
I1022 11:24:46.118641 8536 solver.cpp:236] Iteration 19400, loss = 1.39482
I1022 11:24:46.118669 8536 solver.cpp:252] Train net output #0: loss = 1.39482 (* 1 = 1.39482 loss)
I1022 11:24:46.118674 8536 sgd_solver.cpp:106] Iteration 19400, lr = 0.001
I1022 11:24:58.404811 8536 solver.cpp:236] Iteration 19500, loss = 1.67783
I1022 11:24:58.404847 8536 solver.cpp:252] Train net output #0: loss = 1.67783 (* 1 = 1.67783 loss)
I1022 11:24:58.404853 8536 sgd_solver.cpp:106] Iteration 19500, lr = 0.001
I1022 11:25:09.095026 8536 solver.cpp:236] Iteration 19600, loss = 1.4618
I1022 11:25:09.095100 8536 solver.cpp:252] Train net output #0: loss = 1.4618 (* 1 = 1.4618 loss)
I1022 11:25:09.095106 8536 sgd_solver.cpp:106] Iteration 19600, lr = 0.001
I1022 11:25:19.825944 8536 solver.cpp:236] Iteration 19700, loss = 1.65221
I1022 11:25:19.825980 8536 solver.cpp:252] Train net output #0: loss = 1.65221 (* 1 = 1.65221 loss)
I1022 11:25:19.825986 8536 sgd_solver.cpp:106] Iteration 19700, lr = 0.001
I1022 11:25:30.354027 8536 solver.cpp:236] Iteration 19800, loss = 1.47043
I1022 11:25:30.354063 8536 solver.cpp:252] Train net output #0: loss = 1.47043 (* 1 = 1.47043 loss)
I1022 11:25:30.354068 8536 sgd_solver.cpp:106] Iteration 19800, lr = 0.001
I1022 11:25:40.996464 8536 solver.cpp:236] Iteration 19900, loss = 1.4104
I1022 11:25:40.996557 8536 solver.cpp:252] Train net output #0: loss = 1.4104 (* 1 = 1.4104 loss)
I1022 11:25:40.996569 8536 sgd_solver.cpp:106] Iteration 19900, lr = 0.001
I1022 11:25:51.598611 8536 solver.cpp:340] Iteration 20000, Testing net (#0)
I1022 11:25:56.685449 8536 solver.cpp:408] Test net output #0: accuracy = 0.4218
I1022 11:25:56.685497 8536 solver.cpp:408] Test net output #1: loss = 1.57866 (* 1 = 1.57866 loss)
I1022 11:25:56.754575 8536 solver.cpp:236] Iteration 20000, loss = 1.68525
I1022 11:25:56.754611 8536 solver.cpp:252] Train net output #0: loss = 1.68525 (* 1 = 1.68525 loss)
I1022 11:25:56.754616 8536 sgd_solver.cpp:106] Iteration 20000, lr = 0.001
I1022 11:26:07.276060 8536 solver.cpp:236] Iteration 20100, loss = 1.45834
I1022 11:26:07.276096 8536 solver.cpp:252] Train net output #0: loss = 1.45834 (* 1 = 1.45834 loss)
I1022 11:26:07.276101 8536 sgd_solver.cpp:106] Iteration 20100, lr = 0.001
I1022 11:26:17.806164 8536 solver.cpp:236] Iteration 20200, loss = 1.63545
I1022 11:26:17.806272 8536 solver.cpp:252] Train net output #0: loss = 1.63545 (* 1 = 1.63545 loss)
I1022 11:26:17.806288 8536 sgd_solver.cpp:106] Iteration 20200, lr = 0.001
I1022 11:26:28.304659 8536 solver.cpp:236] Iteration 20300, loss = 1.45305
I1022 11:26:28.304697 8536 solver.cpp:252] Train net output #0: loss = 1.45305 (* 1 = 1.45305 loss)
I1022 11:26:28.304702 8536 sgd_solver.cpp:106] Iteration 20300, lr = 0.001
I1022 11:26:38.699551 8536 solver.cpp:236] Iteration 20400, loss = 1.39302
I1022 11:26:38.699579 8536 solver.cpp:252] Train net output #0: loss = 1.39302 (* 1 = 1.39302 loss)
I1022 11:26:38.699585 8536 sgd_solver.cpp:106] Iteration 20400, lr = 0.001
I1022 11:26:49.082860 8536 solver.cpp:236] Iteration 20500, loss = 1.67335
I1022 11:26:49.082969 8536 solver.cpp:252] Train net output #0: loss = 1.67335 (* 1 = 1.67335 loss)
I1022 11:26:49.082974 8536 sgd_solver.cpp:106] Iteration 20500, lr = 0.001
I1022 11:26:59.632510 8536 solver.cpp:236] Iteration 20600, loss = 1.45115
I1022 11:26:59.632539 8536 solver.cpp:252] Train net output #0: loss = 1.45115 (* 1 = 1.45115 loss)
I1022 11:26:59.632544 8536 sgd_solver.cpp:106] Iteration 20600, lr = 0.001
I1022 11:27:10.128350 8536 solver.cpp:236] Iteration 20700, loss = 1.61431
I1022 11:27:10.128391 8536 solver.cpp:252] Train net output #0: loss = 1.61431 (* 1 = 1.61431 loss)
I1022 11:27:10.128396 8536 sgd_solver.cpp:106] Iteration 20700, lr = 0.001
I1022 11:27:20.647275 8536 solver.cpp:236] Iteration 20800, loss = 1.45903
I1022 11:27:20.647382 8536 solver.cpp:252] Train net output #0: loss = 1.45903 (* 1 = 1.45903 loss)
I1022 11:27:20.647388 8536 sgd_solver.cpp:106] Iteration 20800, lr = 0.001
I1022 11:27:31.165225 8536 solver.cpp:236] Iteration 20900, loss = 1.39597
I1022 11:27:31.165261 8536 solver.cpp:252] Train net output #0: loss = 1.39597 (* 1 = 1.39597 loss)
I1022 11:27:31.165267 8536 sgd_solver.cpp:106] Iteration 20900, lr = 0.001
I1022 11:27:41.585317 8536 solver.cpp:340] Iteration 21000, Testing net (#0)
I1022 11:27:46.819574 8536 solver.cpp:408] Test net output #0: accuracy = 0.4259
I1022 11:27:46.819612 8536 solver.cpp:408] Test net output #1: loss = 1.56778 (* 1 = 1.56778 loss)
I1022 11:27:46.890826 8536 solver.cpp:236] Iteration 21000, loss = 1.6642
I1022 11:27:46.890862 8536 solver.cpp:252] Train net output #0: loss = 1.6642 (* 1 = 1.6642 loss)
I1022 11:27:46.890867 8536 sgd_solver.cpp:106] Iteration 21000, lr = 0.001
I1022 11:27:57.555428 8536 solver.cpp:236] Iteration 21100, loss = 1.44994
I1022 11:27:57.555532 8536 solver.cpp:252] Train net output #0: loss = 1.44994 (* 1 = 1.44994 loss)
I1022 11:27:57.555539 8536 sgd_solver.cpp:106] Iteration 21100, lr = 0.001
I1022 11:28:08.379308 8536 solver.cpp:236] Iteration 21200, loss = 1.60958
I1022 11:28:08.379346 8536 solver.cpp:252] Train net output #0: loss = 1.60958 (* 1 = 1.60958 loss)
I1022 11:28:08.379353 8536 sgd_solver.cpp:106] Iteration 21200, lr = 0.001
I1022 11:28:19.174197 8536 solver.cpp:236] Iteration 21300, loss = 1.45987
I1022 11:28:19.174235 8536 solver.cpp:252] Train net output #0: loss = 1.45987 (* 1 = 1.45987 loss)
I1022 11:28:19.174240 8536 sgd_solver.cpp:106] Iteration 21300, lr = 0.001
I1022 11:28:29.767763 8536 solver.cpp:236] Iteration 21400, loss = 1.39451
I1022 11:28:29.767822 8536 solver.cpp:252] Train net output #0: loss = 1.39451 (* 1 = 1.39451 loss)
I1022 11:28:29.767828 8536 sgd_solver.cpp:106] Iteration 21400, lr = 0.001
I1022 11:28:40.006217 8536 solver.cpp:236] Iteration 21500, loss = 1.66202
I1022 11:28:40.006244 8536 solver.cpp:252] Train net output #0: loss = 1.66202 (* 1 = 1.66202 loss)
I1022 11:28:40.006249 8536 sgd_solver.cpp:106] Iteration 21500, lr = 0.001
I1022 11:28:50.555627 8536 solver.cpp:236] Iteration 21600, loss = 1.434
I1022 11:28:50.555654 8536 solver.cpp:252] Train net output #0: loss = 1.434 (* 1 = 1.434 loss)
I1022 11:28:50.555658 8536 sgd_solver.cpp:106] Iteration 21600, lr = 0.001
I1022 11:29:01.152758 8536 solver.cpp:236] Iteration 21700, loss = 1.58727
I1022 11:29:01.152865 8536 solver.cpp:252] Train net output #0: loss = 1.58727 (* 1 = 1.58727 loss)
I1022 11:29:01.152880 8536 sgd_solver.cpp:106] Iteration 21700, lr = 0.001
I1022 11:29:11.751288 8536 solver.cpp:236] Iteration 21800, loss = 1.43126
I1022 11:29:11.751315 8536 solver.cpp:252] Train net output #0: loss = 1.43126 (* 1 = 1.43126 loss)
I1022 11:29:11.751322 8536 sgd_solver.cpp:106] Iteration 21800, lr = 0.001
I1022 11:29:22.510365 8536 solver.cpp:236] Iteration 21900, loss = 1.37895
I1022 11:29:22.510401 8536 solver.cpp:252] Train net output #0: loss = 1.37895 (* 1 = 1.37895 loss)
I1022 11:29:22.510406 8536 sgd_solver.cpp:106] Iteration 21900, lr = 0.001
I1022 11:29:32.994801 8536 solver.cpp:340] Iteration 22000, Testing net (#0)
I1022 11:29:38.177765 8536 solver.cpp:408] Test net output #0: accuracy = 0.4308
I1022 11:29:38.177800 8536 solver.cpp:408] Test net output #1: loss = 1.55757 (* 1 = 1.55757 loss)
I1022 11:29:38.244222 8536 solver.cpp:236] Iteration 22000, loss = 1.66062
I1022 11:29:38.244257 8536 solver.cpp:252] Train net output #0: loss = 1.66062 (* 1 = 1.66062 loss)
I1022 11:29:38.244262 8536 sgd_solver.cpp:106] Iteration 22000, lr = 0.001
I1022 11:29:49.013748 8536 solver.cpp:236] Iteration 22100, loss = 1.43977
I1022 11:29:49.013784 8536 solver.cpp:252] Train net output #0: loss = 1.43977 (* 1 = 1.43977 loss)
I1022 11:29:49.013792 8536 sgd_solver.cpp:106] Iteration 22100, lr = 0.001
I1022 11:29:59.681447 8536 solver.cpp:236] Iteration 22200, loss = 1.6062
I1022 11:29:59.681485 8536 solver.cpp:252] Train net output #0: loss = 1.6062 (* 1 = 1.6062 loss)
I1022 11:29:59.681490 8536 sgd_solver.cpp:106] Iteration 22200, lr = 0.001
I1022 11:30:10.171463 8536 solver.cpp:236] Iteration 22300, loss = 1.43489
I1022 11:30:10.171563 8536 solver.cpp:252] Train net output #0: loss = 1.43489 (* 1 = 1.43489 loss)
I1022 11:30:10.171569 8536 sgd_solver.cpp:106] Iteration 22300, lr = 0.001
I1022 11:30:20.638829 8536 solver.cpp:236] Iteration 22400, loss = 1.37533
I1022 11:30:20.638866 8536 solver.cpp:252] Train net output #0: loss = 1.37533 (* 1 = 1.37533 loss)
I1022 11:30:20.638871 8536 sgd_solver.cpp:106] Iteration 22400, lr = 0.001
I1022 11:30:31.072971 8536 solver.cpp:236] Iteration 22500, loss = 1.6607
I1022 11:30:31.073007 8536 solver.cpp:252] Train net output #0: loss = 1.6607 (* 1 = 1.6607 loss)
I1022 11:30:31.073014 8536 sgd_solver.cpp:106] Iteration 22500, lr = 0.001
I1022 11:30:41.480780 8536 solver.cpp:236] Iteration 22600, loss = 1.43862
I1022 11:30:41.480878 8536 solver.cpp:252] Train net output #0: loss = 1.43862 (* 1 = 1.43862 loss)
I1022 11:30:41.480883 8536 sgd_solver.cpp:106] Iteration 22600, lr = 0.001
I1022 11:30:51.959641 8536 solver.cpp:236] Iteration 22700, loss = 1.57797
I1022 11:30:51.959671 8536 solver.cpp:252] Train net output #0: loss = 1.57797 (* 1 = 1.57797 loss)
I1022 11:30:51.959676 8536 sgd_solver.cpp:106] Iteration 22700, lr = 0.001
I1022 11:31:02.416637 8536 solver.cpp:236] Iteration 22800, loss = 1.43455
I1022 11:31:02.416674 8536 solver.cpp:252] Train net output #0: loss = 1.43455 (* 1 = 1.43455 loss)
I1022 11:31:02.416681 8536 sgd_solver.cpp:106] Iteration 22800, lr = 0.001
I1022 11:31:12.869185 8536 solver.cpp:236] Iteration 22900, loss = 1.36973
I1022 11:31:12.869288 8536 solver.cpp:252] Train net output #0: loss = 1.36973 (* 1 = 1.36973 loss)
I1022 11:31:12.869294 8536 sgd_solver.cpp:106] Iteration 22900, lr = 0.001
I1022 11:31:23.270099 8536 solver.cpp:340] Iteration 23000, Testing net (#0)
I1022 11:31:28.469693 8536 solver.cpp:408] Test net output #0: accuracy = 0.4314
I1022 11:31:28.469728 8536 solver.cpp:408] Test net output #1: loss = 1.55868 (* 1 = 1.55868 loss)
I1022 11:31:28.536613 8536 solver.cpp:236] Iteration 23000, loss = 1.64803
I1022 11:31:28.536639 8536 solver.cpp:252] Train net output #0: loss = 1.64803 (* 1 = 1.64803 loss)
I1022 11:31:28.536644 8536 sgd_solver.cpp:106] Iteration 23000, lr = 0.001
I1022 11:31:39.238369 8536 solver.cpp:236] Iteration 23100, loss = 1.42929
I1022 11:31:39.238409 8536 solver.cpp:252] Train net output #0: loss = 1.42929 (* 1 = 1.42929 loss)
I1022 11:31:39.238414 8536 sgd_solver.cpp:106] Iteration 23100, lr = 0.001
I1022 11:31:49.765833 8536 solver.cpp:236] Iteration 23200, loss = 1.58734
I1022 11:31:49.765919 8536 solver.cpp:252] Train net output #0: loss = 1.58734 (* 1 = 1.58734 loss)
I1022 11:31:49.765924 8536 sgd_solver.cpp:106] Iteration 23200, lr = 0.001
I1022 11:32:00.358640 8536 solver.cpp:236] Iteration 23300, loss = 1.43082
I1022 11:32:00.358676 8536 solver.cpp:252] Train net output #0: loss = 1.43082 (* 1 = 1.43082 loss)
I1022 11:32:00.358682 8536 sgd_solver.cpp:106] Iteration 23300, lr = 0.001
I1022 11:32:10.823578 8536 solver.cpp:236] Iteration 23400, loss = 1.37993
I1022 11:32:10.823603 8536 solver.cpp:252] Train net output #0: loss = 1.37993 (* 1 = 1.37993 loss)
I1022 11:32:10.823608 8536 sgd_solver.cpp:106] Iteration 23400, lr = 0.001
I1022 11:32:21.264716 8536 solver.cpp:236] Iteration 23500, loss = 1.63362
I1022 11:32:21.264787 8536 solver.cpp:252] Train net output #0: loss = 1.63362 (* 1 = 1.63362 loss)
I1022 11:32:21.264793 8536 sgd_solver.cpp:106] Iteration 23500, lr = 0.001
I1022 11:32:31.528718 8536 solver.cpp:236] Iteration 23600, loss = 1.43366
I1022 11:32:31.528754 8536 solver.cpp:252] Train net output #0: loss = 1.43366 (* 1 = 1.43366 loss)
I1022 11:32:31.528760 8536 sgd_solver.cpp:106] Iteration 23600, lr = 0.001
I1022 11:32:41.933521 8536 solver.cpp:236] Iteration 23700, loss = 1.58674
I1022 11:32:41.933560 8536 solver.cpp:252] Train net output #0: loss = 1.58674 (* 1 = 1.58674 loss)
I1022 11:32:41.933567 8536 sgd_solver.cpp:106] Iteration 23700, lr = 0.001
I1022 11:32:52.656209 8536 solver.cpp:236] Iteration 23800, loss = 1.41597
I1022 11:32:52.656311 8536 solver.cpp:252] Train net output #0: loss = 1.41597 (* 1 = 1.41597 loss)
I1022 11:32:52.656316 8536 sgd_solver.cpp:106] Iteration 23800, lr = 0.001
I1022 11:33:03.166453 8536 solver.cpp:236] Iteration 23900, loss = 1.36292
I1022 11:33:03.166481 8536 solver.cpp:252] Train net output #0: loss = 1.36292 (* 1 = 1.36292 loss)
I1022 11:33:03.166486 8536 sgd_solver.cpp:106] Iteration 23900, lr = 0.001
I1022 11:33:13.531685 8536 solver.cpp:340] Iteration 24000, Testing net (#0)
I1022 11:33:18.598912 8536 solver.cpp:408] Test net output #0: accuracy = 0.431
I1022 11:33:18.598944 8536 solver.cpp:408] Test net output #1: loss = 1.54621 (* 1 = 1.54621 loss)
I1022 11:33:18.665500 8536 solver.cpp:236] Iteration 24000, loss = 1.63982
I1022 11:33:18.665534 8536 solver.cpp:252] Train net output #0: loss = 1.63982 (* 1 = 1.63982 loss)
I1022 11:33:18.665539 8536 sgd_solver.cpp:106] Iteration 24000, lr = 0.001
I1022 11:33:29.116564 8536 solver.cpp:236] Iteration 24100, loss = 1.41556
I1022 11:33:29.116669 8536 solver.cpp:252] Train net output #0: loss = 1.41556 (* 1 = 1.41556 loss)
I1022 11:33:29.116684 8536 sgd_solver.cpp:106] Iteration 24100, lr = 0.001
I1022 11:33:39.566375 8536 solver.cpp:236] Iteration 24200, loss = 1.56235
I1022 11:33:39.566411 8536 solver.cpp:252] Train net output #0: loss = 1.56235 (* 1 = 1.56235 loss)
I1022 11:33:39.566416 8536 sgd_solver.cpp:106] Iteration 24200, lr = 0.001
I1022 11:33:50.017849 8536 solver.cpp:236] Iteration 24300, loss = 1.41285
I1022 11:33:50.017886 8536 solver.cpp:252] Train net output #0: loss = 1.41285 (* 1 = 1.41285 loss)
I1022 11:33:50.017891 8536 sgd_solver.cpp:106] Iteration 24300, lr = 0.001
I1022 11:34:00.466984 8536 solver.cpp:236] Iteration 24400, loss = 1.37083
I1022 11:34:00.467092 8536 solver.cpp:252] Train net output #0: loss = 1.37083 (* 1 = 1.37083 loss)
I1022 11:34:00.467108 8536 sgd_solver.cpp:106] Iteration 24400, lr = 0.001
I1022 11:34:10.928581 8536 solver.cpp:236] Iteration 24500, loss = 1.63523
I1022 11:34:10.928616 8536 solver.cpp:252] Train net output #0: loss = 1.63523 (* 1 = 1.63523 loss)
I1022 11:34:10.928621 8536 sgd_solver.cpp:106] Iteration 24500, lr = 0.001
I1022 11:34:21.387420 8536 solver.cpp:236] Iteration 24600, loss = 1.42009
I1022 11:34:21.387447 8536 solver.cpp:252] Train net output #0: loss = 1.42009 (* 1 = 1.42009 loss)
I1022 11:34:21.387452 8536 sgd_solver.cpp:106] Iteration 24600, lr = 0.001
I1022 11:34:31.623276 8536 solver.cpp:236] Iteration 24700, loss = 1.56551
I1022 11:34:31.623383 8536 solver.cpp:252] Train net output #0: loss = 1.56551 (* 1 = 1.56551 loss)
I1022 11:34:31.623389 8536 sgd_solver.cpp:106] Iteration 24700, lr = 0.001
I1022 11:34:42.077611 8536 solver.cpp:236] Iteration 24800, loss = 1.43489
I1022 11:34:42.077648 8536 solver.cpp:252] Train net output #0: loss = 1.43489 (* 1 = 1.43489 loss)
I1022 11:34:42.077653 8536 sgd_solver.cpp:106] Iteration 24800, lr = 0.001
I1022 11:34:52.528301 8536 solver.cpp:236] Iteration 24900, loss = 1.36453
I1022 11:34:52.528327 8536 solver.cpp:252] Train net output #0: loss = 1.36453 (* 1 = 1.36453 loss)
I1022 11:34:52.528333 8536 sgd_solver.cpp:106] Iteration 24900, lr = 0.001
I1022 11:35:02.889945 8536 solver.cpp:340] Iteration 25000, Testing net (#0)
I1022 11:35:07.998065 8536 solver.cpp:408] Test net output #0: accuracy = 0.4329
I1022 11:35:07.998100 8536 solver.cpp:408] Test net output #1: loss = 1.54336 (* 1 = 1.54336 loss)
I1022 11:35:08.064112 8536 solver.cpp:236] Iteration 25000, loss = 1.62938
I1022 11:35:08.064149 8536 solver.cpp:252] Train net output #0: loss = 1.62938 (* 1 = 1.62938 loss)
I1022 11:35:08.064154 8536 sgd_solver.cpp:106] Iteration 25000, lr = 0.001
I1022 11:35:18.592597 8536 solver.cpp:236] Iteration 25100, loss = 1.41808
I1022 11:35:18.592625 8536 solver.cpp:252] Train net output #0: loss = 1.41808 (* 1 = 1.41808 loss)
I1022 11:35:18.592630 8536 sgd_solver.cpp:106] Iteration 25100, lr = 0.001
I1022 11:35:29.145010 8536 solver.cpp:236] Iteration 25200, loss = 1.55364
I1022 11:35:29.145056 8536 solver.cpp:252] Train net output #0: loss = 1.55364 (* 1 = 1.55364 loss)
I1022 11:35:29.145066 8536 sgd_solver.cpp:106] Iteration 25200, lr = 0.001
I1022 11:35:39.651571 8536 solver.cpp:236] Iteration 25300, loss = 1.43037
I1022 11:35:39.651674 8536 solver.cpp:252] Train net output #0: loss = 1.43037 (* 1 = 1.43037 loss)
I1022 11:35:39.651680 8536 sgd_solver.cpp:106] Iteration 25300, lr = 0.001
I1022 11:35:50.193931 8536 solver.cpp:236] Iteration 25400, loss = 1.36059
I1022 11:35:50.193969 8536 solver.cpp:252] Train net output #0: loss = 1.36059 (* 1 = 1.36059 loss)
I1022 11:35:50.193974 8536 sgd_solver.cpp:106] Iteration 25400, lr = 0.001
I1022 11:36:00.654006 8536 solver.cpp:236] Iteration 25500, loss = 1.64038
I1022 11:36:00.654042 8536 solver.cpp:252] Train net output #0: loss = 1.64038 (* 1 = 1.64038 loss)
I1022 11:36:00.654047 8536 sgd_solver.cpp:106] Iteration 25500, lr = 0.001
I1022 11:36:11.120630 8536 solver.cpp:236] Iteration 25600, loss = 1.43291
I1022 11:36:11.120718 8536 solver.cpp:252] Train net output #0: loss = 1.43291 (* 1 = 1.43291 loss)
I1022 11:36:11.120723 8536 sgd_solver.cpp:106] Iteration 25600, lr = 0.001
I1022 11:36:21.485081 8536 solver.cpp:236] Iteration 25700, loss = 1.55469
I1022 11:36:21.485117 8536 solver.cpp:252] Train net output #0: loss = 1.55469 (* 1 = 1.55469 loss)
I1022 11:36:21.485124 8536 sgd_solver.cpp:106] Iteration 25700, lr = 0.001
I1022 11:36:31.832706 8536 solver.cpp:236] Iteration 25800, loss = 1.40826
I1022 11:36:31.832743 8536 solver.cpp:252] Train net output #0: loss = 1.40826 (* 1 = 1.40826 loss)
I1022 11:36:31.832748 8536 sgd_solver.cpp:106] Iteration 25800, lr = 0.001
I1022 11:36:42.356111 8536 solver.cpp:236] Iteration 25900, loss = 1.3593
I1022 11:36:42.356178 8536 solver.cpp:252] Train net output #0: loss = 1.3593 (* 1 = 1.3593 loss)
I1022 11:36:42.356183 8536 sgd_solver.cpp:106] Iteration 25900, lr = 0.001
I1022 11:36:52.779764 8536 solver.cpp:340] Iteration 26000, Testing net (#0)
I1022 11:36:57.839565 8536 solver.cpp:408] Test net output #0: accuracy = 0.435
I1022 11:36:57.839587 8536 solver.cpp:408] Test net output #1: loss = 1.53226 (* 1 = 1.53226 loss)
I1022 11:36:57.899875 8536 solver.cpp:236] Iteration 26000, loss = 1.61618
I1022 11:36:57.899909 8536 solver.cpp:252] Train net output #0: loss = 1.61618 (* 1 = 1.61618 loss)
I1022 11:36:57.899915 8536 sgd_solver.cpp:106] Iteration 26000, lr = 0.001
I1022 11:37:08.415971 8536 solver.cpp:236] Iteration 26100, loss = 1.41137
I1022 11:37:08.416005 8536 solver.cpp:252] Train net output #0: loss = 1.41137 (* 1 = 1.41137 loss)
I1022 11:37:08.416010 8536 sgd_solver.cpp:106] Iteration 26100, lr = 0.001
I1022 11:37:18.888247 8536 solver.cpp:236] Iteration 26200, loss = 1.5667
I1022 11:37:18.888304 8536 solver.cpp:252] Train net output #0: loss = 1.5667 (* 1 = 1.5667 loss)
I1022 11:37:18.888310 8536 sgd_solver.cpp:106] Iteration 26200, lr = 0.001
I1022 11:37:29.355510 8536 solver.cpp:236] Iteration 26300, loss = 1.42579
I1022 11:37:29.355546 8536 solver.cpp:252] Train net output #0: loss = 1.42579 (* 1 = 1.42579 loss)
I1022 11:37:29.355551 8536 sgd_solver.cpp:106] Iteration 26300, lr = 0.001
I1022 11:37:39.817487 8536 solver.cpp:236] Iteration 26400, loss = 1.36754
I1022 11:37:39.817513 8536 solver.cpp:252] Train net output #0: loss = 1.36754 (* 1 = 1.36754 loss)
I1022 11:37:39.817519 8536 sgd_solver.cpp:106] Iteration 26400, lr = 0.001
I1022 11:37:50.302403 8536 solver.cpp:236] Iteration 26500, loss = 1.60775
I1022 11:37:50.302498 8536 solver.cpp:252] Train net output #0: loss = 1.60775 (* 1 = 1.60775 loss)
I1022 11:37:50.302505 8536 sgd_solver.cpp:106] Iteration 26500, lr = 0.001
I1022 11:38:00.894369 8536 solver.cpp:236] Iteration 26600, loss = 1.40808
I1022 11:38:00.894407 8536 solver.cpp:252] Train net output #0: loss = 1.40808 (* 1 = 1.40808 loss)
I1022 11:38:00.894412 8536 sgd_solver.cpp:106] Iteration 26600, lr = 0.001
I1022 11:38:11.447166 8536 solver.cpp:236] Iteration 26700, loss = 1.54969
I1022 11:38:11.447195 8536 solver.cpp:252] Train net output #0: loss = 1.54969 (* 1 = 1.54969 loss)
I1022 11:38:11.447201 8536 sgd_solver.cpp:106] Iteration 26700, lr = 0.001
I1022 11:38:21.740226 8536 solver.cpp:236] Iteration 26800, loss = 1.41622
I1022 11:38:21.740335 8536 solver.cpp:252] Train net output #0: loss = 1.41622 (* 1 = 1.41622 loss)
I1022 11:38:21.740340 8536 sgd_solver.cpp:106] Iteration 26800, lr = 0.001
I1022 11:38:32.202797 8536 solver.cpp:236] Iteration 26900, loss = 1.35619
I1022 11:38:32.202834 8536 solver.cpp:252] Train net output #0: loss = 1.35619 (* 1 = 1.35619 loss)
I1022 11:38:32.202839 8536 sgd_solver.cpp:106] Iteration 26900, lr = 0.001
I1022 11:38:42.690114 8536 solver.cpp:340] Iteration 27000, Testing net (#0)
I1022 11:38:47.855507 8536 solver.cpp:408] Test net output #0: accuracy = 0.4396
I1022 11:38:47.855541 8536 solver.cpp:408] Test net output #1: loss = 1.52835 (* 1 = 1.52835 loss)
I1022 11:38:47.921905 8536 solver.cpp:236] Iteration 27000, loss = 1.5928
I1022 11:38:47.921938 8536 solver.cpp:252] Train net output #0: loss = 1.5928 (* 1 = 1.5928 loss)
I1022 11:38:47.921943 8536 sgd_solver.cpp:106] Iteration 27000, lr = 0.001
I1022 11:38:58.413758 8536 solver.cpp:236] Iteration 27100, loss = 1.41937
I1022 11:38:58.413866 8536 solver.cpp:252] Train net output #0: loss = 1.41937 (* 1 = 1.41937 loss)
I1022 11:38:58.413871 8536 sgd_solver.cpp:106] Iteration 27100, lr = 0.001
I1022 11:39:09.149554 8536 solver.cpp:236] Iteration 27200, loss = 1.54478
I1022 11:39:09.149590 8536 solver.cpp:252] Train net output #0: loss = 1.54478 (* 1 = 1.54478 loss)
I1022 11:39:09.149596 8536 sgd_solver.cpp:106] Iteration 27200, lr = 0.001
I1022 11:39:19.607967 8536 solver.cpp:236] Iteration 27300, loss = 1.39288
I1022 11:39:19.608003 8536 solver.cpp:252] Train net output #0: loss = 1.39288 (* 1 = 1.39288 loss)
I1022 11:39:19.608010 8536 sgd_solver.cpp:106] Iteration 27300, lr = 0.001
I1022 11:39:30.068370 8536 solver.cpp:236] Iteration 27400, loss = 1.34193
I1022 11:39:30.068480 8536 solver.cpp:252] Train net output #0: loss = 1.34193 (* 1 = 1.34193 loss)
I1022 11:39:30.068485 8536 sgd_solver.cpp:106] Iteration 27400, lr = 0.001
I1022 11:39:40.523309 8536 solver.cpp:236] Iteration 27500, loss = 1.59458
I1022 11:39:40.523345 8536 solver.cpp:252] Train net output #0: loss = 1.59458 (* 1 = 1.59458 loss)
I1022 11:39:40.523351 8536 sgd_solver.cpp:106] Iteration 27500, lr = 0.001
I1022 11:39:51.256079 8536 solver.cpp:236] Iteration 27600, loss = 1.40049
I1022 11:39:51.256115 8536 solver.cpp:252] Train net output #0: loss = 1.40049 (* 1 = 1.40049 loss)
I1022 11:39:51.256121 8536 sgd_solver.cpp:106] Iteration 27600, lr = 0.001
I1022 11:40:01.740494 8536 solver.cpp:236] Iteration 27700, loss = 1.52313
I1022 11:40:01.740583 8536 solver.cpp:252] Train net output #0: loss = 1.52313 (* 1 = 1.52313 loss)
I1022 11:40:01.740589 8536 sgd_solver.cpp:106] Iteration 27700, lr = 0.001
I1022 11:40:12.170728 8536 solver.cpp:236] Iteration 27800, loss = 1.42821
I1022 11:40:12.170765 8536 solver.cpp:252] Train net output #0: loss = 1.42821 (* 1 = 1.42821 loss)
I1022 11:40:12.170771 8536 sgd_solver.cpp:106] Iteration 27800, lr = 0.001
I1022 11:40:22.424669 8536 solver.cpp:236] Iteration 27900, loss = 1.33793
I1022 11:40:22.424705 8536 solver.cpp:252] Train net output #0: loss = 1.33793 (* 1 = 1.33793 loss)
I1022 11:40:22.424710 8536 sgd_solver.cpp:106] Iteration 27900, lr = 0.001
I1022 11:40:32.779554 8536 solver.cpp:340] Iteration 28000, Testing net (#0)
I1022 11:40:37.841409 8536 solver.cpp:408] Test net output #0: accuracy = 0.4415
I1022 11:40:37.841436 8536 solver.cpp:408] Test net output #1: loss = 1.52014 (* 1 = 1.52014 loss)
I1022 11:40:37.907819 8536 solver.cpp:236] Iteration 28000, loss = 1.60771
I1022 11:40:37.907845 8536 solver.cpp:252] Train net output #0: loss = 1.60771 (* 1 = 1.60771 loss)
I1022 11:40:37.907850 8536 sgd_solver.cpp:106] Iteration 28000, lr = 0.001
I1022 11:40:48.367655 8536 solver.cpp:236] Iteration 28100, loss = 1.40457
I1022 11:40:48.367691 8536 solver.cpp:252] Train net output #0: loss = 1.40457 (* 1 = 1.40457 loss)
I1022 11:40:48.367696 8536 sgd_solver.cpp:106] Iteration 28100, lr = 0.001
I1022 11:40:58.821410 8536 solver.cpp:236] Iteration 28200, loss = 1.5407
I1022 11:40:58.821439 8536 solver.cpp:252] Train net output #0: loss = 1.5407 (* 1 = 1.5407 loss)
I1022 11:40:58.821444 8536 sgd_solver.cpp:106] Iteration 28200, lr = 0.001
I1022 11:41:09.278826 8536 solver.cpp:236] Iteration 28300, loss = 1.42615
I1022 11:41:09.278933 8536 solver.cpp:252] Train net output #0: loss = 1.42615 (* 1 = 1.42615 loss)
I1022 11:41:09.278937 8536 sgd_solver.cpp:106] Iteration 28300, lr = 0.001
I1022 11:41:19.734524 8536 solver.cpp:236] Iteration 28400, loss = 1.34755
I1022 11:41:19.734550 8536 solver.cpp:252] Train net output #0: loss = 1.34755 (* 1 = 1.34755 loss)
I1022 11:41:19.734555 8536 sgd_solver.cpp:106] Iteration 28400, lr = 0.001
I1022 11:41:30.195405 8536 solver.cpp:236] Iteration 28500, loss = 1.61975
I1022 11:41:30.195442 8536 solver.cpp:252] Train net output #0: loss = 1.61975 (* 1 = 1.61975 loss)
I1022 11:41:30.195447 8536 sgd_solver.cpp:106] Iteration 28500, lr = 0.001
I1022 11:41:40.655810 8536 solver.cpp:236] Iteration 28600, loss = 1.39329
I1022 11:41:40.655881 8536 solver.cpp:252] Train net output #0: loss = 1.39329 (* 1 = 1.39329 loss)
I1022 11:41:40.655887 8536 sgd_solver.cpp:106] Iteration 28600, lr = 0.001
I1022 11:41:51.121827 8536 solver.cpp:236] Iteration 28700, loss = 1.535
I1022 11:41:51.121863 8536 solver.cpp:252] Train net output #0: loss = 1.535 (* 1 = 1.535 loss)
I1022 11:41:51.121868 8536 sgd_solver.cpp:106] Iteration 28700, lr = 0.001
I1022 11:42:01.590451 8536 solver.cpp:236] Iteration 28800, loss = 1.45852
I1022 11:42:01.590488 8536 solver.cpp:252] Train net output #0: loss = 1.45852 (* 1 = 1.45852 loss)
I1022 11:42:01.590493 8536 sgd_solver.cpp:106] Iteration 28800, lr = 0.001
I1022 11:42:11.847002 8536 solver.cpp:236] Iteration 28900, loss = 1.34046
I1022 11:42:11.847098 8536 solver.cpp:252] Train net output #0: loss = 1.34046 (* 1 = 1.34046 loss)
I1022 11:42:11.847113 8536 sgd_solver.cpp:106] Iteration 28900, lr = 0.001
I1022 11:42:22.172972 8536 solver.cpp:340] Iteration 29000, Testing net (#0)
I1022 11:42:27.223444 8536 solver.cpp:408] Test net output #0: accuracy = 0.4431
I1022 11:42:27.223471 8536 solver.cpp:408] Test net output #1: loss = 1.51207 (* 1 = 1.51207 loss)
I1022 11:42:27.290298 8536 solver.cpp:236] Iteration 29000, loss = 1.59525
I1022 11:42:27.290333 8536 solver.cpp:252] Train net output #0: loss = 1.59525 (* 1 = 1.59525 loss)
I1022 11:42:27.290338 8536 sgd_solver.cpp:106] Iteration 29000, lr = 0.001
I1022 11:42:37.746861 8536 solver.cpp:236] Iteration 29100, loss = 1.38651
I1022 11:42:37.746891 8536 solver.cpp:252] Train net output #0: loss = 1.38651 (* 1 = 1.38651 loss)
I1022 11:42:37.746897 8536 sgd_solver.cpp:106] Iteration 29100, lr = 0.001
I1022 11:42:48.209677 8536 solver.cpp:236] Iteration 29200, loss = 1.53256
I1022 11:42:48.209781 8536 solver.cpp:252] Train net output #0: loss = 1.53256 (* 1 = 1.53256 loss)
I1022 11:42:48.209786 8536 sgd_solver.cpp:106] Iteration 29200, lr = 0.001
I1022 11:42:58.672240 8536 solver.cpp:236] Iteration 29300, loss = 1.40044
I1022 11:42:58.672268 8536 solver.cpp:252] Train net output #0: loss = 1.40044 (* 1 = 1.40044 loss)
I1022 11:42:58.672274 8536 sgd_solver.cpp:106] Iteration 29300, lr = 0.001
I1022 11:43:09.128767 8536 solver.cpp:236] Iteration 29400, loss = 1.33163
I1022 11:43:09.128803 8536 solver.cpp:252] Train net output #0: loss = 1.33163 (* 1 = 1.33163 loss)
I1022 11:43:09.128808 8536 sgd_solver.cpp:106] Iteration 29400, lr = 0.001
I1022 11:43:19.593798 8536 solver.cpp:236] Iteration 29500, loss = 1.59855
I1022 11:43:19.593894 8536 solver.cpp:252] Train net output #0: loss = 1.59855 (* 1 = 1.59855 loss)
I1022 11:43:19.593900 8536 sgd_solver.cpp:106] Iteration 29500, lr = 0.001
I1022 11:43:30.049505 8536 solver.cpp:236] Iteration 29600, loss = 1.39279
I1022 11:43:30.049542 8536 solver.cpp:252] Train net output #0: loss = 1.39279 (* 1 = 1.39279 loss)
I1022 11:43:30.049548 8536 sgd_solver.cpp:106] Iteration 29600, lr = 0.001
I1022 11:43:40.500507 8536 solver.cpp:236] Iteration 29700, loss = 1.52737
I1022 11:43:40.500543 8536 solver.cpp:252] Train net output #0: loss = 1.52737 (* 1 = 1.52737 loss)
I1022 11:43:40.500548 8536 sgd_solver.cpp:106] Iteration 29700, lr = 0.001
I1022 11:43:50.961921 8536 solver.cpp:236] Iteration 29800, loss = 1.41553
I1022 11:43:50.961999 8536 solver.cpp:252] Train net output #0: loss = 1.41553 (* 1 = 1.41553 loss)
I1022 11:43:50.962004 8536 sgd_solver.cpp:106] Iteration 29800, lr = 0.001
I1022 11:44:01.424275 8536 solver.cpp:236] Iteration 29900, loss = 1.3335
I1022 11:44:01.424310 8536 solver.cpp:252] Train net output #0: loss = 1.3335 (* 1 = 1.3335 loss)
I1022 11:44:01.424315 8536 sgd_solver.cpp:106] Iteration 29900, lr = 0.001
I1022 11:44:11.556589 8536 solver.cpp:340] Iteration 30000, Testing net (#0)
I1022 11:44:16.622351 8536 solver.cpp:408] Test net output #0: accuracy = 0.4487
I1022 11:44:16.622377 8536 solver.cpp:408] Test net output #1: loss = 1.50874 (* 1 = 1.50874 loss)
I1022 11:44:16.689335 8536 solver.cpp:236] Iteration 30000, loss = 1.60222
I1022 11:44:16.689368 8536 solver.cpp:252] Train net output #0: loss = 1.60222 (* 1 = 1.60222 loss)
I1022 11:44:16.689373 8536 sgd_solver.cpp:46] MultiStep Status: Iteration 30000, step = 1
I1022 11:44:16.689374 8536 sgd_solver.cpp:106] Iteration 30000, lr = 0.0001
I1022 11:44:27.153178 8536 solver.cpp:236] Iteration 30100, loss = 1.35916
I1022 11:44:27.153288 8536 solver.cpp:252] Train net output #0: loss = 1.35916 (* 1 = 1.35916 loss)
I1022 11:44:27.153293 8536 sgd_solver.cpp:106] Iteration 30100, lr = 0.0001
I1022 11:44:37.619060 8536 solver.cpp:236] Iteration 30200, loss = 1.53312
I1022 11:44:37.619094 8536 solver.cpp:252] Train net output #0: loss = 1.53312 (* 1 = 1.53312 loss)
I1022 11:44:37.619101 8536 sgd_solver.cpp:106] Iteration 30200, lr = 0.0001
I1022 11:44:48.081648 8536 solver.cpp:236] Iteration 30300, loss = 1.39462
I1022 11:44:48.081684 8536 solver.cpp:252] Train net output #0: loss = 1.39462 (* 1 = 1.39462 loss)
I1022 11:44:48.081689 8536 sgd_solver.cpp:106] Iteration 30300, lr = 0.0001
I1022 11:44:58.642339 8536 solver.cpp:236] Iteration 30400, loss = 1.30419
I1022 11:44:58.642408 8536 solver.cpp:252] Train net output #0: loss = 1.30419 (* 1 = 1.30419 loss)
I1022 11:44:58.642412 8536 sgd_solver.cpp:106] Iteration 30400, lr = 0.0001
I1022 11:45:09.102916 8536 solver.cpp:236] Iteration 30500, loss = 1.56812
I1022 11:45:09.102943 8536 solver.cpp:252] Train net output #0: loss = 1.56812 (* 1 = 1.56812 loss)
I1022 11:45:09.102947 8536 sgd_solver.cpp:106] Iteration 30500, lr = 0.0001
I1022 11:45:19.749362 8536 solver.cpp:236] Iteration 30600, loss = 1.35886
I1022 11:45:19.749402 8536 solver.cpp:252] Train net output #0: loss = 1.35886 (* 1 = 1.35886 loss)
I1022 11:45:19.749407 8536 sgd_solver.cpp:106] Iteration 30600, lr = 0.0001
I1022 11:45:30.359005 8536 solver.cpp:236] Iteration 30700, loss = 1.51926
I1022 11:45:30.359102 8536 solver.cpp:252] Train net output #0: loss = 1.51926 (* 1 = 1.51926 loss)
I1022 11:45:30.359107 8536 sgd_solver.cpp:106] Iteration 30700, lr = 0.0001
I1022 11:45:40.980216 8536 solver.cpp:236] Iteration 30800, loss = 1.38614
I1022 11:45:40.980244 8536 solver.cpp:252] Train net output #0: loss = 1.38614 (* 1 = 1.38614 loss)
I1022 11:45:40.980249 8536 sgd_solver.cpp:106] Iteration 30800, lr = 0.0001
I1022 11:45:51.586606 8536 solver.cpp:236] Iteration 30900, loss = 1.30159
I1022 11:45:51.586642 8536 solver.cpp:252] Train net output #0: loss = 1.30159 (* 1 = 1.30159 loss)
I1022 11:45:51.586647 8536 sgd_solver.cpp:106] Iteration 30900, lr = 0.0001
I1022 11:46:01.836460 8536 solver.cpp:340] Iteration 31000, Testing net (#0)
I1022 11:46:06.811063 8536 solver.cpp:408] Test net output #0: accuracy = 0.4635
I1022 11:46:06.811087 8536 solver.cpp:408] Test net output #1: loss = 1.4685 (* 1 = 1.4685 loss)
I1022 11:46:06.870163 8536 solver.cpp:236] Iteration 31000, loss = 1.56621
I1022 11:46:06.870183 8536 solver.cpp:252] Train net output #0: loss = 1.56621 (* 1 = 1.56621 loss)
I1022 11:46:06.870188 8536 sgd_solver.cpp:106] Iteration 31000, lr = 0.0001
I1022 11:46:17.354161 8536 solver.cpp:236] Iteration 31100, loss = 1.35175
I1022 11:46:17.354197 8536 solver.cpp:252] Train net output #0: loss = 1.35175 (* 1 = 1.35175 loss)
I1022 11:46:17.354202 8536 sgd_solver.cpp:106] Iteration 31100, lr = 0.0001
I1022 11:46:27.861910 8536 solver.cpp:236] Iteration 31200, loss = 1.51541
I1022 11:46:27.861963 8536 solver.cpp:252] Train net output #0: loss = 1.51541 (* 1 = 1.51541 loss)
I1022 11:46:27.861973 8536 sgd_solver.cpp:106] Iteration 31200, lr = 0.0001
I1022 11:46:38.359853 8536 solver.cpp:236] Iteration 31300, loss = 1.384
I1022 11:46:38.359959 8536 solver.cpp:252] Train net output #0: loss = 1.384 (* 1 = 1.384 loss)
I1022 11:46:38.359964 8536 sgd_solver.cpp:106] Iteration 31300, lr = 0.0001
I1022 11:46:48.870753 8536 solver.cpp:236] Iteration 31400, loss = 1.30238
I1022 11:46:48.870789 8536 solver.cpp:252] Train net output #0: loss = 1.30238 (* 1 = 1.30238 loss)
I1022 11:46:48.870795 8536 sgd_solver.cpp:106] Iteration 31400, lr = 0.0001
I1022 11:46:59.352591 8536 solver.cpp:236] Iteration 31500, loss = 1.56438
I1022 11:46:59.352627 8536 solver.cpp:252] Train net output #0: loss = 1.56438 (* 1 = 1.56438 loss)
I1022 11:46:59.352632 8536 sgd_solver.cpp:106] Iteration 31500, lr = 0.0001
I1022 11:47:09.941921 8536 solver.cpp:236] Iteration 31600, loss = 1.34864
I1022 11:47:09.942008 8536 solver.cpp:252] Train net output #0: loss = 1.34864 (* 1 = 1.34864 loss)
I1022 11:47:09.942014 8536 sgd_solver.cpp:106] Iteration 31600, lr = 0.0001
I1022 11:47:20.485656 8536 solver.cpp:236] Iteration 31700, loss = 1.51057
I1022 11:47:20.485682 8536 solver.cpp:252] Train net output #0: loss = 1.51057 (* 1 = 1.51057 loss)
I1022 11:47:20.485687 8536 sgd_solver.cpp:106] Iteration 31700, lr = 0.0001
I1022 11:47:30.989984 8536 solver.cpp:236] Iteration 31800, loss = 1.38325
I1022 11:47:30.990020 8536 solver.cpp:252] Train net output #0: loss = 1.38325 (* 1 = 1.38325 loss)
I1022 11:47:30.990025 8536 sgd_solver.cpp:106] Iteration 31800, lr = 0.0001
I1022 11:47:41.506654 8536 solver.cpp:236] Iteration 31900, loss = 1.30192
I1022 11:47:41.506739 8536 solver.cpp:252] Train net output #0: loss = 1.30192 (* 1 = 1.30192 loss)
I1022 11:47:41.506744 8536 sgd_solver.cpp:106] Iteration 31900, lr = 0.0001
I1022 11:47:51.971623 8536 solver.cpp:340] Iteration 32000, Testing net (#0)
I1022 11:47:56.997818 8536 solver.cpp:408] Test net output #0: accuracy = 0.4626
I1022 11:47:56.997854 8536 solver.cpp:408] Test net output #1: loss = 1.46698 (* 1 = 1.46698 loss)
I1022 11:47:57.055127 8536 solver.cpp:236] Iteration 32000, loss = 1.56312
I1022 11:47:57.055163 8536 solver.cpp:252] Train net output #0: loss = 1.56312 (* 1 = 1.56312 loss)
I1022 11:47:57.055168 8536 sgd_solver.cpp:106] Iteration 32000, lr = 0.0001
I1022 11:48:07.438297 8536 solver.cpp:236] Iteration 32100, loss = 1.34697
I1022 11:48:07.438334 8536 solver.cpp:252] Train net output #0: loss = 1.34697 (* 1 = 1.34697 loss)
I1022 11:48:07.438340 8536 sgd_solver.cpp:106] Iteration 32100, lr = 0.0001
I1022 11:48:18.006460 8536 solver.cpp:236] Iteration 32200, loss = 1.50827
I1022 11:48:18.006554 8536 solver.cpp:252] Train net output #0: loss = 1.50827 (* 1 = 1.50827 loss)
I1022 11:48:18.006561 8536 sgd_solver.cpp:106] Iteration 32200, lr = 0.0001
I1022 11:48:28.571450 8536 solver.cpp:236] Iteration 32300, loss = 1.38206
I1022 11:48:28.571477 8536 solver.cpp:252] Train net output #0: loss = 1.38206 (* 1 = 1.38206 loss)
I1022 11:48:28.571483 8536 sgd_solver.cpp:106] Iteration 32300, lr = 0.0001
I1022 11:48:39.103261 8536 solver.cpp:236] Iteration 32400, loss = 1.30185
I1022 11:48:39.103297 8536 solver.cpp:252] Train net output #0: loss = 1.30185 (* 1 = 1.30185 loss)
I1022 11:48:39.103302 8536 sgd_solver.cpp:106] Iteration 32400, lr = 0.0001
I1022 11:48:49.596376 8536 solver.cpp:236] Iteration 32500, loss = 1.56153
I1022 11:48:49.596485 8536 solver.cpp:252] Train net output #0: loss = 1.56153 (* 1 = 1.56153 loss)
I1022 11:48:49.596492 8536 sgd_solver.cpp:106] Iteration 32500, lr = 0.0001
I1022 11:49:00.156345 8536 solver.cpp:236] Iteration 32600, loss = 1.34566
I1022 11:49:00.156373 8536 solver.cpp:252] Train net output #0: loss = 1.34566 (* 1 = 1.34566 loss)
I1022 11:49:00.156380 8536 sgd_solver.cpp:106] Iteration 32600, lr = 0.0001
I1022 11:49:10.681100 8536 solver.cpp:236] Iteration 32700, loss = 1.5073
I1022 11:49:10.681138 8536 solver.cpp:252] Train net output #0: loss = 1.5073 (* 1 = 1.5073 loss)
I1022 11:49:10.681143 8536 sgd_solver.cpp:106] Iteration 32700, lr = 0.0001
I1022 11:49:21.214411 8536 solver.cpp:236] Iteration 32800, loss = 1.38062
I1022 11:49:21.214481 8536 solver.cpp:252] Train net output #0: loss = 1.38062 (* 1 = 1.38062 loss)
I1022 11:49:21.214486 8536 sgd_solver.cpp:106] Iteration 32800, lr = 0.0001
I1022 11:49:31.773218 8536 solver.cpp:236] Iteration 32900, loss = 1.30129
I1022 11:49:31.773253 8536 solver.cpp:252] Train net output #0: loss = 1.30129 (* 1 = 1.30129 loss)
I1022 11:49:31.773258 8536 sgd_solver.cpp:106] Iteration 32900, lr = 0.0001
I1022 11:49:42.182524 8536 solver.cpp:340] Iteration 33000, Testing net (#0)
I1022 11:49:47.386404 8536 solver.cpp:408] Test net output #0: accuracy = 0.4632
I1022 11:49:47.386440 8536 solver.cpp:408] Test net output #1: loss = 1.46573 (* 1 = 1.46573 loss)
I1022 11:49:47.449969 8536 solver.cpp:236] Iteration 33000, loss = 1.55962
I1022 11:49:47.450004 8536 solver.cpp:252] Train net output #0: loss = 1.55962 (* 1 = 1.55962 loss)
I1022 11:49:47.450009 8536 sgd_solver.cpp:106] Iteration 33000, lr = 0.0001
I1022 11:49:57.774360 8536 solver.cpp:236] Iteration 33100, loss = 1.34456
I1022 11:49:57.774466 8536 solver.cpp:252] Train net output #0: loss = 1.34456 (* 1 = 1.34456 loss)
I1022 11:49:57.774471 8536 sgd_solver.cpp:106] Iteration 33100, lr = 0.0001
I1022 11:50:08.294697 8536 solver.cpp:236] Iteration 33200, loss = 1.50617
I1022 11:50:08.294734 8536 solver.cpp:252] Train net output #0: loss = 1.50617 (* 1 = 1.50617 loss)
I1022 11:50:08.294739 8536 sgd_solver.cpp:106] Iteration 33200, lr = 0.0001
I1022 11:50:18.818411 8536 solver.cpp:236] Iteration 33300, loss = 1.3794
I1022 11:50:18.818452 8536 solver.cpp:252] Train net output #0: loss = 1.3794 (* 1 = 1.3794 loss)
I1022 11:50:18.818459 8536 sgd_solver.cpp:106] Iteration 33300, lr = 0.0001
I1022 11:50:29.380736 8536 solver.cpp:236] Iteration 33400, loss = 1.30068
I1022 11:50:29.380823 8536 solver.cpp:252] Train net output #0: loss = 1.30068 (* 1 = 1.30068 loss)
I1022 11:50:29.380828 8536 sgd_solver.cpp:106] Iteration 33400, lr = 0.0001
I1022 11:50:39.882395 8536 solver.cpp:236] Iteration 33500, loss = 1.55791
I1022 11:50:39.882432 8536 solver.cpp:252] Train net output #0: loss = 1.55791 (* 1 = 1.55791 loss)
I1022 11:50:39.882437 8536 sgd_solver.cpp:106] Iteration 33500, lr = 0.0001
I1022 11:50:50.369966 8536 solver.cpp:236] Iteration 33600, loss = 1.34354
I1022 11:50:50.370002 8536 solver.cpp:252] Train net output #0: loss = 1.34354 (* 1 = 1.34354 loss)
I1022 11:50:50.370007 8536 sgd_solver.cpp:106] Iteration 33600, lr = 0.0001
I1022 11:51:00.860945 8536 solver.cpp:236] Iteration 33700, loss = 1.50495
I1022 11:51:00.861033 8536 solver.cpp:252] Train net output #0: loss = 1.50495 (* 1 = 1.50495 loss)
I1022 11:51:00.861039 8536 sgd_solver.cpp:106] Iteration 33700, lr = 0.0001
I1022 11:51:11.364581 8536 solver.cpp:236] Iteration 33800, loss = 1.37846
I1022 11:51:11.364619 8536 solver.cpp:252] Train net output #0: loss = 1.37846 (* 1 = 1.37846 loss)
I1022 11:51:11.364624 8536 sgd_solver.cpp:106] Iteration 33800, lr = 0.0001
I1022 11:51:21.898196 8536 solver.cpp:236] Iteration 33900, loss = 1.30004
I1022 11:51:21.898232 8536 solver.cpp:252] Train net output #0: loss = 1.30004 (* 1 = 1.30004 loss)
I1022 11:51:21.898237 8536 sgd_solver.cpp:106] Iteration 33900, lr = 0.0001
I1022 11:51:32.306247 8536 solver.cpp:340] Iteration 34000, Testing net (#0)
I1022 11:51:37.394461 8536 solver.cpp:408] Test net output #0: accuracy = 0.463
I1022 11:51:37.394496 8536 solver.cpp:408] Test net output #1: loss = 1.46451 (* 1 = 1.46451 loss)
I1022 11:51:37.462360 8536 solver.cpp:236] Iteration 34000, loss = 1.55647
I1022 11:51:37.462395 8536 solver.cpp:252] Train net output #0: loss = 1.55647 (* 1 = 1.55647 loss)
I1022 11:51:37.462400 8536 sgd_solver.cpp:106] Iteration 34000, lr = 0.0001
I1022 11:51:47.964891 8536 solver.cpp:236] Iteration 34100, loss = 1.34263
I1022 11:51:47.964916 8536 solver.cpp:252] Train net output #0: loss = 1.34263 (* 1 = 1.34263 loss)
I1022 11:51:47.964922 8536 sgd_solver.cpp:106] Iteration 34100, lr = 0.0001
I1022 11:51:58.268203 8536 solver.cpp:236] Iteration 34200, loss = 1.50366
I1022 11:51:58.268229 8536 solver.cpp:252] Train net output #0: loss = 1.50366 (* 1 = 1.50366 loss)
I1022 11:51:58.268234 8536 sgd_solver.cpp:106] Iteration 34200, lr = 0.0001
I1022 11:52:08.780068 8536 solver.cpp:236] Iteration 34300, loss = 1.3776
I1022 11:52:08.780158 8536 solver.cpp:252] Train net output #0: loss = 1.3776 (* 1 = 1.3776 loss)
I1022 11:52:08.780164 8536 sgd_solver.cpp:106] Iteration 34300, lr = 0.0001
I1022 11:52:19.272028 8536 solver.cpp:236] Iteration 34400, loss = 1.29945
I1022 11:52:19.272063 8536 solver.cpp:252] Train net output #0: loss = 1.29945 (* 1 = 1.29945 loss)
I1022 11:52:19.272069 8536 sgd_solver.cpp:106] Iteration 34400, lr = 0.0001
I1022 11:52:29.783839 8536 solver.cpp:236] Iteration 34500, loss = 1.55522
I1022 11:52:29.783877 8536 solver.cpp:252] Train net output #0: loss = 1.55522 (* 1 = 1.55522 loss)
I1022 11:52:29.783882 8536 sgd_solver.cpp:106] Iteration 34500, lr = 0.0001
I1022 11:52:40.297365 8536 solver.cpp:236] Iteration 34600, loss = 1.34177
I1022 11:52:40.297511 8536 solver.cpp:252] Train net output #0: loss = 1.34177 (* 1 = 1.34177 loss)
I1022 11:52:40.297518 8536 sgd_solver.cpp:106] Iteration 34600, lr = 0.0001
I1022 11:52:50.825037 8536 solver.cpp:236] Iteration 34700, loss = 1.50237
I1022 11:52:50.825076 8536 solver.cpp:252] Train net output #0: loss = 1.50237 (* 1 = 1.50237 loss)
I1022 11:52:50.825083 8536 sgd_solver.cpp:106] Iteration 34700, lr = 0.0001
I1022 11:53:01.321588 8536 solver.cpp:236] Iteration 34800, loss = 1.37683
I1022 11:53:01.321624 8536 solver.cpp:252] Train net output #0: loss = 1.37683 (* 1 = 1.37683 loss)
I1022 11:53:01.321629 8536 sgd_solver.cpp:106] Iteration 34800, lr = 0.0001
I1022 11:53:11.829751 8536 solver.cpp:236] Iteration 34900, loss = 1.29893
I1022 11:53:11.829828 8536 solver.cpp:252] Train net output #0: loss = 1.29893 (* 1 = 1.29893 loss)
I1022 11:53:11.829834 8536 sgd_solver.cpp:106] Iteration 34900, lr = 0.0001
I1022 11:53:22.262332 8536 solver.cpp:340] Iteration 35000, Testing net (#0)
I1022 11:53:27.346029 8536 solver.cpp:408] Test net output #0: accuracy = 0.4635
I1022 11:53:27.346063 8536 solver.cpp:408] Test net output #1: loss = 1.46337 (* 1 = 1.46337 loss)
I1022 11:53:27.414964 8536 solver.cpp:236] Iteration 35000, loss = 1.55403
I1022 11:53:27.415000 8536 solver.cpp:252] Train net output #0: loss = 1.55403 (* 1 = 1.55403 loss)
I1022 11:53:27.415006 8536 sgd_solver.cpp:46] MultiStep Status: Iteration 35000, step = 2
I1022 11:53:27.415009 8536 sgd_solver.cpp:106] Iteration 35000, lr = 1e-05
I1022 11:53:37.923060 8536 solver.cpp:236] Iteration 35100, loss = 1.34269
I1022 11:53:37.923089 8536 solver.cpp:252] Train net output #0: loss = 1.34269 (* 1 = 1.34269 loss)
I1022 11:53:37.923095 8536 sgd_solver.cpp:106] Iteration 35100, lr = 1e-05
I1022 11:53:48.417335 8536 solver.cpp:236] Iteration 35200, loss = 1.50586
I1022 11:53:48.417438 8536 solver.cpp:252] Train net output #0: loss = 1.50586 (* 1 = 1.50586 loss)
I1022 11:53:48.417443 8536 sgd_solver.cpp:106] Iteration 35200, lr = 1e-05
I1022 11:53:58.963354 8536 solver.cpp:236] Iteration 35300, loss = 1.38995
I1022 11:53:58.963392 8536 solver.cpp:252] Train net output #0: loss = 1.38995 (* 1 = 1.38995 loss)
I1022 11:53:58.963397 8536 sgd_solver.cpp:106] Iteration 35300, lr = 1e-05
I1022 11:54:09.442404 8536 solver.cpp:236] Iteration 35400, loss = 1.29111
I1022 11:54:09.442440 8536 solver.cpp:252] Train net output #0: loss = 1.29111 (* 1 = 1.29111 loss)
I1022 11:54:09.442445 8536 sgd_solver.cpp:106] Iteration 35400, lr = 1e-05
I1022 11:54:19.973907 8536 solver.cpp:236] Iteration 35500, loss = 1.54932
I1022 11:54:19.973978 8536 solver.cpp:252] Train net output #0: loss = 1.54932 (* 1 = 1.54932 loss)
I1022 11:54:19.973984 8536 sgd_solver.cpp:106] Iteration 35500, lr = 1e-05
I1022 11:54:30.503561 8536 solver.cpp:236] Iteration 35600, loss = 1.34406
I1022 11:54:30.503600 8536 solver.cpp:252] Train net output #0: loss = 1.34406 (* 1 = 1.34406 loss)
I1022 11:54:30.503605 8536 sgd_solver.cpp:106] Iteration 35600, lr = 1e-05
I1022 11:54:41.000613 8536 solver.cpp:236] Iteration 35700, loss = 1.50176
I1022 11:54:41.000650 8536 solver.cpp:252] Train net output #0: loss = 1.50176 (* 1 = 1.50176 loss)
I1022 11:54:41.000654 8536 sgd_solver.cpp:106] Iteration 35700, lr = 1e-05
I1022 11:54:51.552323 8536 solver.cpp:236] Iteration 35800, loss = 1.38695
I1022 11:54:51.552412 8536 solver.cpp:252] Train net output #0: loss = 1.38695 (* 1 = 1.38695 loss)
I1022 11:54:51.552417 8536 sgd_solver.cpp:106] Iteration 35800, lr = 1e-05
I1022 11:55:02.025522 8536 solver.cpp:236] Iteration 35900, loss = 1.28913
I1022 11:55:02.025559 8536 solver.cpp:252] Train net output #0: loss = 1.28913 (* 1 = 1.28913 loss)
I1022 11:55:02.025564 8536 sgd_solver.cpp:106] Iteration 35900, lr = 1e-05
I1022 11:55:12.383492 8536 solver.cpp:340] Iteration 36000, Testing net (#0)
I1022 11:55:17.454381 8536 solver.cpp:408] Test net output #0: accuracy = 0.4624
I1022 11:55:17.454416 8536 solver.cpp:408] Test net output #1: loss = 1.46146 (* 1 = 1.46146 loss)
I1022 11:55:17.519373 8536 solver.cpp:236] Iteration 36000, loss = 1.54857
I1022 11:55:17.519407 8536 solver.cpp:252] Train net output #0: loss = 1.54857 (* 1 = 1.54857 loss)
I1022 11:55:17.519412 8536 sgd_solver.cpp:106] Iteration 36000, lr = 1e-05
I1022 11:55:28.058266 8536 solver.cpp:236] Iteration 36100, loss = 1.34408
I1022 11:55:28.058336 8536 solver.cpp:252] Train net output #0: loss = 1.34408 (* 1 = 1.34408 loss)
I1022 11:55:28.058342 8536 sgd_solver.cpp:106] Iteration 36100, lr = 1e-05
I1022 11:55:38.576894 8536 solver.cpp:236] Iteration 36200, loss = 1.50114
I1022 11:55:38.576936 8536 solver.cpp:252] Train net output #0: loss = 1.50114 (* 1 = 1.50114 loss)
I1022 11:55:38.576942 8536 sgd_solver.cpp:106] Iteration 36200, lr = 1e-05
I1022 11:55:48.832025 8536 solver.cpp:236] Iteration 36300, loss = 1.38623
I1022 11:55:48.832062 8536 solver.cpp:252] Train net output #0: loss = 1.38623 (* 1 = 1.38623 loss)
I1022 11:55:48.832067 8536 sgd_solver.cpp:106] Iteration 36300, lr = 1e-05
I1022 11:55:59.301328 8536 solver.cpp:236] Iteration 36400, loss = 1.28841
I1022 11:55:59.301399 8536 solver.cpp:252] Train net output #0: loss = 1.28841 (* 1 = 1.28841 loss)
I1022 11:55:59.301405 8536 sgd_solver.cpp:106] Iteration 36400, lr = 1e-05
I1022 11:56:09.872306 8536 solver.cpp:236] Iteration 36500, loss = 1.54851
I1022 11:56:09.872337 8536 solver.cpp:252] Train net output #0: loss = 1.54851 (* 1 = 1.54851 loss)
I1022 11:56:09.872342 8536 sgd_solver.cpp:106] Iteration 36500, lr = 1e-05
I1022 11:56:20.589830 8536 solver.cpp:236] Iteration 36600, loss = 1.34401
I1022 11:56:20.589867 8536 solver.cpp:252] Train net output #0: loss = 1.34401 (* 1 = 1.34401 loss)
I1022 11:56:20.589872 8536 sgd_solver.cpp:106] Iteration 36600, lr = 1e-05
I1022 11:56:32.078441 8536 solver.cpp:236] Iteration 36700, loss = 1.50103
I1022 11:56:32.078553 8536 solver.cpp:252] Train net output #0: loss = 1.50103 (* 1 = 1.50103 loss)
I1022 11:56:32.078568 8536 sgd_solver.cpp:106] Iteration 36700, lr = 1e-05
I1022 11:56:42.593332 8536 solver.cpp:236] Iteration 36800, loss = 1.38598
I1022 11:56:42.593369 8536 solver.cpp:252] Train net output #0: loss = 1.38598 (* 1 = 1.38598 loss)
I1022 11:56:42.593372 8536 sgd_solver.cpp:106] Iteration 36800, lr = 1e-05
I1022 11:56:53.084534 8536 solver.cpp:236] Iteration 36900, loss = 1.28808
I1022 11:56:53.084561 8536 solver.cpp:252] Train net output #0: loss = 1.28808 (* 1 = 1.28808 loss)
I1022 11:56:53.084565 8536 sgd_solver.cpp:106] Iteration 36900, lr = 1e-05
I1022 11:57:03.611953 8536 solver.cpp:340] Iteration 37000, Testing net (#0)
I1022 11:57:09.567453 8536 solver.cpp:408] Test net output #0: accuracy = 0.4627
I1022 11:57:09.567499 8536 solver.cpp:408] Test net output #1: loss = 1.46136 (* 1 = 1.46136 loss)
I1022 11:57:09.636031 8536 solver.cpp:236] Iteration 37000, loss = 1.54852
I1022 11:57:09.636056 8536 solver.cpp:252] Train net output #0: loss = 1.54852 (* 1 = 1.54852 loss)
I1022 11:57:09.636061 8536 sgd_solver.cpp:106] Iteration 37000, lr = 1e-05
I1022 11:57:20.137596 8536 solver.cpp:236] Iteration 37100, loss = 1.3439
I1022 11:57:20.137624 8536 solver.cpp:252] Train net output #0: loss = 1.3439 (* 1 = 1.3439 loss)
I1022 11:57:20.137629 8536 sgd_solver.cpp:106] Iteration 37100, lr = 1e-05
I1022 11:57:30.688383 8536 solver.cpp:236] Iteration 37200, loss = 1.50103
I1022 11:57:30.688410 8536 solver.cpp:252] Train net output #0: loss = 1.50103 (* 1 = 1.50103 loss)
I1022 11:57:30.688416 8536 sgd_solver.cpp:106] Iteration 37200, lr = 1e-05
I1022 11:57:44.473042 8536 solver.cpp:236] Iteration 37300, loss = 1.38583
I1022 11:57:44.473134 8536 solver.cpp:252] Train net output #0: loss = 1.38583 (* 1 = 1.38583 loss)
I1022 11:57:44.473140 8536 sgd_solver.cpp:106] Iteration 37300, lr = 1e-05
I1022 11:57:59.218261 8536 solver.cpp:236] Iteration 37400, loss = 1.28786
I1022 11:57:59.218289 8536 solver.cpp:252] Train net output #0: loss = 1.28786 (* 1 = 1.28786 loss)
I1022 11:57:59.218294 8536 sgd_solver.cpp:106] Iteration 37400, lr = 1e-05
I1022 11:58:09.733634 8536 solver.cpp:236] Iteration 37500, loss = 1.54853
I1022 11:58:09.733669 8536 solver.cpp:252] Train net output #0: loss = 1.54853 (* 1 = 1.54853 loss)
I1022 11:58:09.733675 8536 sgd_solver.cpp:106] Iteration 37500, lr = 1e-05
I1022 11:58:21.074187 8536 solver.cpp:236] Iteration 37600, loss = 1.34378
I1022 11:58:21.074277 8536 solver.cpp:252] Train net output #0: loss = 1.34378 (* 1 = 1.34378 loss)
I1022 11:58:21.074283 8536 sgd_solver.cpp:106] Iteration 37600, lr = 1e-05
I1022 11:58:36.137024 8536 solver.cpp:236] Iteration 37700, loss = 1.50102
I1022 11:58:36.137074 8536 solver.cpp:252] Train net output #0: loss = 1.50102 (* 1 = 1.50102 loss)
I1022 11:58:36.137084 8536 sgd_solver.cpp:106] Iteration 37700, lr = 1e-05
I1022 11:58:51.222218 8536 solver.cpp:236] Iteration 37800, loss = 1.38572
I1022 11:58:51.222304 8536 solver.cpp:252] Train net output #0: loss = 1.38572 (* 1 = 1.38572 loss)
I1022 11:58:51.222309 8536 sgd_solver.cpp:106] Iteration 37800, lr = 1e-05
I1022 11:59:06.287335 8536 solver.cpp:236] Iteration 37900, loss = 1.2877
I1022 11:59:06.287374 8536 solver.cpp:252] Train net output #0: loss = 1.2877 (* 1 = 1.2877 loss)
I1022 11:59:06.287379 8536 sgd_solver.cpp:106] Iteration 37900, lr = 1e-05
I1022 11:59:17.783056 8536 solver.cpp:340] Iteration 38000, Testing net (#0)
I1022 11:59:22.865303 8536 solver.cpp:408] Test net output #0: accuracy = 0.4628
I1022 11:59:22.865396 8536 solver.cpp:408] Test net output #1: loss = 1.46126 (* 1 = 1.46126 loss)
I1022 11:59:22.928535 8536 solver.cpp:236] Iteration 38000, loss = 1.54853
I1022 11:59:22.928562 8536 solver.cpp:252] Train net output #0: loss = 1.54853 (* 1 = 1.54853 loss)
I1022 11:59:22.928566 8536 sgd_solver.cpp:106] Iteration 38000, lr = 1e-05
I1022 11:59:33.442332 8536 solver.cpp:236] Iteration 38100, loss = 1.34365
I1022 11:59:33.442369 8536 solver.cpp:252] Train net output #0: loss = 1.34365 (* 1 = 1.34365 loss)
I1022 11:59:33.442374 8536 sgd_solver.cpp:106] Iteration 38100, lr = 1e-05
I1022 11:59:43.953290 8536 solver.cpp:236] Iteration 38200, loss = 1.50101
I1022 11:59:43.953317 8536 solver.cpp:252] Train net output #0: loss = 1.50101 (* 1 = 1.50101 loss)
I1022 11:59:43.953321 8536 sgd_solver.cpp:106] Iteration 38200, lr = 1e-05
I1022 11:59:57.230319 8536 solver.cpp:236] Iteration 38300, loss = 1.38562
I1022 11:59:57.230406 8536 solver.cpp:252] Train net output #0: loss = 1.38562 (* 1 = 1.38562 loss)
I1022 11:59:57.230412 8536 sgd_solver.cpp:106] Iteration 38300, lr = 1e-05
I1022 12:00:12.137794 8536 solver.cpp:236] Iteration 38400, loss = 1.28757
I1022 12:00:12.137830 8536 solver.cpp:252] Train net output #0: loss = 1.28757 (* 1 = 1.28757 loss)
I1022 12:00:12.137836 8536 sgd_solver.cpp:106] Iteration 38400, lr = 1e-05
I1022 12:00:27.207715 8536 solver.cpp:236] Iteration 38500, loss = 1.54851
I1022 12:00:27.207752 8536 solver.cpp:252] Train net output #0: loss = 1.54851 (* 1 = 1.54851 loss)
I1022 12:00:27.207758 8536 sgd_solver.cpp:106] Iteration 38500, lr = 1e-05
I1022 12:00:38.647505 8536 solver.cpp:236] Iteration 38600, loss = 1.34354
I1022 12:00:38.647543 8536 solver.cpp:252] Train net output #0: loss = 1.34354 (* 1 = 1.34354 loss)
I1022 12:00:38.647558 8536 sgd_solver.cpp:106] Iteration 38600, lr = 1e-05
I1022 12:00:49.226148 8536 solver.cpp:236] Iteration 38700, loss = 1.50098
I1022 12:00:49.226184 8536 solver.cpp:252] Train net output #0: loss = 1.50098 (* 1 = 1.50098 loss)
I1022 12:00:49.226189 8536 sgd_solver.cpp:106] Iteration 38700, lr = 1e-05
I1022 12:00:59.797153 8536 solver.cpp:236] Iteration 38800, loss = 1.38552
I1022 12:00:59.797193 8536 solver.cpp:252] Train net output #0: loss = 1.38552 (* 1 = 1.38552 loss)
I1022 12:00:59.797199 8536 sgd_solver.cpp:106] Iteration 38800, lr = 1e-05
I1022 12:01:12.047413 8536 solver.cpp:236] Iteration 38900, loss = 1.28745
I1022 12:01:12.047502 8536 solver.cpp:252] Train net output #0: loss = 1.28745 (* 1 = 1.28745 loss)
I1022 12:01:12.047508 8536 sgd_solver.cpp:106] Iteration 38900, lr = 1e-05
I1022 12:01:22.470515 8536 solver.cpp:340] Iteration 39000, Testing net (#0)
I1022 12:01:27.582578 8536 solver.cpp:408] Test net output #0: accuracy = 0.4627
I1022 12:01:27.582612 8536 solver.cpp:408] Test net output #1: loss = 1.46116 (* 1 = 1.46116 loss)
I1022 12:01:27.647591 8536 solver.cpp:236] Iteration 39000, loss = 1.54848
I1022 12:01:27.647629 8536 solver.cpp:252] Train net output #0: loss = 1.54848 (* 1 = 1.54848 loss)
I1022 12:01:27.647634 8536 sgd_solver.cpp:106] Iteration 39000, lr = 1e-05
I1022 12:01:38.190942 8536 solver.cpp:236] Iteration 39100, loss = 1.34343
I1022 12:01:38.190976 8536 solver.cpp:252] Train net output #0: loss = 1.34343 (* 1 = 1.34343 loss)
I1022 12:01:38.190982 8536 sgd_solver.cpp:106] Iteration 39100, lr = 1e-05
I1022 12:01:48.655378 8536 solver.cpp:236] Iteration 39200, loss = 1.50094
I1022 12:01:48.655448 8536 solver.cpp:252] Train net output #0: loss = 1.50094 (* 1 = 1.50094 loss)
I1022 12:01:48.655454 8536 sgd_solver.cpp:106] Iteration 39200, lr = 1e-05
I1022 12:01:59.202416 8536 solver.cpp:236] Iteration 39300, loss = 1.38543
I1022 12:01:59.202452 8536 solver.cpp:252] Train net output #0: loss = 1.38543 (* 1 = 1.38543 loss)
I1022 12:01:59.202458 8536 sgd_solver.cpp:106] Iteration 39300, lr = 1e-05
I1022 12:02:09.744837 8536 solver.cpp:236] Iteration 39400, loss = 1.28733
I1022 12:02:09.744874 8536 solver.cpp:252] Train net output #0: loss = 1.28733 (* 1 = 1.28733 loss)
I1022 12:02:09.744879 8536 sgd_solver.cpp:106] Iteration 39400, lr = 1e-05
I1022 12:02:20.084470 8536 solver.cpp:236] Iteration 39500, loss = 1.54845
I1022 12:02:20.084569 8536 solver.cpp:252] Train net output #0: loss = 1.54845 (* 1 = 1.54845 loss)
I1022 12:02:20.084575 8536 sgd_solver.cpp:106] Iteration 39500, lr = 1e-05
I1022 12:02:30.590031 8536 solver.cpp:236] Iteration 39600, loss = 1.34332
I1022 12:02:30.590059 8536 solver.cpp:252] Train net output #0: loss = 1.34332 (* 1 = 1.34332 loss)
I1022 12:02:30.590064 8536 sgd_solver.cpp:106] Iteration 39600, lr = 1e-05
I1022 12:02:41.090946 8536 solver.cpp:236] Iteration 39700, loss = 1.50089
I1022 12:02:41.090981 8536 solver.cpp:252] Train net output #0: loss = 1.50089 (* 1 = 1.50089 loss)
I1022 12:02:41.090986 8536 sgd_solver.cpp:106] Iteration 39700, lr = 1e-05
I1022 12:02:51.588835 8536 solver.cpp:236] Iteration 39800, loss = 1.38534
I1022 12:02:51.588923 8536 solver.cpp:252] Train net output #0: loss = 1.38534 (* 1 = 1.38534 loss)
I1022 12:02:51.588930 8536 sgd_solver.cpp:106] Iteration 39800, lr = 1e-05
I1022 12:03:02.116503 8536 solver.cpp:236] Iteration 39900, loss = 1.28723
I1022 12:03:02.116530 8536 solver.cpp:252] Train net output #0: loss = 1.28723 (* 1 = 1.28723 loss)
I1022 12:03:02.116535 8536 sgd_solver.cpp:106] Iteration 39900, lr = 1e-05
I1022 12:03:12.569393 8536 solver.cpp:461] Snapshotting to binary proto file examples/cifar10_full_sigmoid_iter_40000.caffemodel
I1022 12:03:12.607450 8536 sgd_solver.cpp:269] Snapshotting solver state to binary proto file examples/cifar10_full_sigmoid_iter_40000.solverstate
I1022 12:03:12.655246 8536 solver.cpp:320] Iteration 40000, loss = 1.54841
I1022 12:03:12.655266 8536 solver.cpp:340] Iteration 40000, Testing net (#0)
I1022 12:03:17.699828 8536 solver.cpp:408] Test net output #0: accuracy = 0.463
I1022 12:03:17.699864 8536 solver.cpp:408] Test net output #1: loss = 1.46104 (* 1 = 1.46104 loss)
I1022 12:03:17.699867 8536 solver.cpp:325] Optimization Done.
I1022 12:03:17.699869 8536 caffe.cpp:215] Optimization Done.
I1022 10:47:12.151319 8564 caffe.cpp:184] Using GPUs 0
I1022 10:47:12.435086 8564 solver.cpp:47] Initializing solver from parameters:
test_iter: 100
test_interval: 1000
base_lr: 0.001
display: 100
max_iter: 40000
lr_policy: "multistep"
gamma: 0.1
momentum: 0.9
weight_decay: 0.004
snapshot_prefix: "examples/cifar10_full_sigmoid_bn"
solver_mode: GPU
device_id: 0
net: "examples/cifar10/cifar10_full_sigmoid_train_test_bn.prototxt"
test_initialization: false
stepvalue: 30000
stepvalue: 35000
I1022 10:47:12.435207 8564 solver.cpp:90] Creating training net from net file: examples/cifar10/cifar10_full_sigmoid_train_test_bn.prototxt
I1022 10:47:12.435619 8564 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer cifar
I1022 10:47:12.435631 8564 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer bn1
I1022 10:47:12.435644 8564 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer bn2
I1022 10:47:12.435648 8564 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer bn3
I1022 10:47:12.435652 8564 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I1022 10:47:12.435748 8564 net.cpp:49] Initializing net from parameters:
name: "CIFAR10_full"
state {
phase: TRAIN
}
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
data_param {
source: "examples/cifar10/cifar10_train_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 0
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.0001
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "bn1"
type: "BatchNorm"
bottom: "pool1"
top: "bn1"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TRAIN
}
batch_norm_param {
use_global_stats: false
}
}
layer {
name: "Sigmoid1"
type: "Sigmoid"
bottom: "bn1"
top: "bn1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "bn1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 0
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "bn2"
type: "BatchNorm"
bottom: "conv2"
top: "conv2"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TRAIN
}
batch_norm_param {
use_global_stats: false
}
}
layer {
name: "Sigmoid2"
type: "Sigmoid"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 0
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "bn3"
type: "BatchNorm"
bottom: "conv3"
top: "conv3"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TRAIN
}
batch_norm_param {
use_global_stats: false
}
}
layer {
name: "Sigmoid3"
type: "Sigmoid"
bottom: "conv3"
top: "conv3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool3"
top: "ip1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 10
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip1"
bottom: "label"
top: "loss"
}
I1022 10:47:12.436312 8564 layer_factory.hpp:76] Creating layer cifar
I1022 10:47:12.436770 8564 net.cpp:106] Creating Layer cifar
I1022 10:47:12.436794 8564 net.cpp:411] cifar -> data
I1022 10:47:12.436823 8564 net.cpp:411] cifar -> label
I1022 10:47:12.436842 8564 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto
I1022 10:47:12.437497 8566 db_lmdb.cpp:38] Opened lmdb examples/cifar10/cifar10_train_lmdb
I1022 10:47:12.475785 8564 data_layer.cpp:45] output data size: 100,3,32,32
I1022 10:47:12.481014 8564 net.cpp:150] Setting up cifar
I1022 10:47:12.481081 8564 net.cpp:157] Top shape: 100 3 32 32 (307200)
I1022 10:47:12.481094 8564 net.cpp:157] Top shape: 100 (100)
I1022 10:47:12.481103 8564 net.cpp:165] Memory required for data: 1229200
I1022 10:47:12.481117 8564 layer_factory.hpp:76] Creating layer conv1
I1022 10:47:12.481139 8564 net.cpp:106] Creating Layer conv1
I1022 10:47:12.481151 8564 net.cpp:454] conv1 <- data
I1022 10:47:12.481169 8564 net.cpp:411] conv1 -> conv1
I1022 10:47:12.481775 8564 net.cpp:150] Setting up conv1
I1022 10:47:12.481782 8564 net.cpp:157] Top shape: 100 32 32 32 (3276800)
I1022 10:47:12.481796 8564 net.cpp:165] Memory required for data: 14336400
I1022 10:47:12.481807 8564 layer_factory.hpp:76] Creating layer pool1
I1022 10:47:12.481813 8564 net.cpp:106] Creating Layer pool1
I1022 10:47:12.481830 8564 net.cpp:454] pool1 <- conv1
I1022 10:47:12.481843 8564 net.cpp:411] pool1 -> pool1
I1022 10:47:12.482364 8564 net.cpp:150] Setting up pool1
I1022 10:47:12.482389 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.482399 8564 net.cpp:165] Memory required for data: 17613200
I1022 10:47:12.482409 8564 layer_factory.hpp:76] Creating layer bn1
I1022 10:47:12.482426 8564 net.cpp:106] Creating Layer bn1
I1022 10:47:12.482437 8564 net.cpp:454] bn1 <- pool1
I1022 10:47:12.482450 8564 net.cpp:411] bn1 -> bn1
I1022 10:47:12.482601 8564 net.cpp:150] Setting up bn1
I1022 10:47:12.482616 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.482626 8564 net.cpp:165] Memory required for data: 20890000
I1022 10:47:12.482642 8564 layer_factory.hpp:76] Creating layer Sigmoid1
I1022 10:47:12.482655 8564 net.cpp:106] Creating Layer Sigmoid1
I1022 10:47:12.482664 8564 net.cpp:454] Sigmoid1 <- bn1
I1022 10:47:12.482674 8564 net.cpp:397] Sigmoid1 -> bn1 (in-place)
I1022 10:47:12.482686 8564 net.cpp:150] Setting up Sigmoid1
I1022 10:47:12.482695 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.482703 8564 net.cpp:165] Memory required for data: 24166800
I1022 10:47:12.482712 8564 layer_factory.hpp:76] Creating layer conv2
I1022 10:47:12.482728 8564 net.cpp:106] Creating Layer conv2
I1022 10:47:12.482738 8564 net.cpp:454] conv2 <- bn1
I1022 10:47:12.482749 8564 net.cpp:411] conv2 -> conv2
I1022 10:47:12.484033 8564 net.cpp:150] Setting up conv2
I1022 10:47:12.484043 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.484045 8564 net.cpp:165] Memory required for data: 27443600
I1022 10:47:12.484050 8564 layer_factory.hpp:76] Creating layer bn2
I1022 10:47:12.484055 8564 net.cpp:106] Creating Layer bn2
I1022 10:47:12.484071 8564 net.cpp:454] bn2 <- conv2
I1022 10:47:12.484086 8564 net.cpp:397] bn2 -> conv2 (in-place)
I1022 10:47:12.484208 8564 net.cpp:150] Setting up bn2
I1022 10:47:12.484215 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.484216 8564 net.cpp:165] Memory required for data: 30720400
I1022 10:47:12.484223 8564 layer_factory.hpp:76] Creating layer Sigmoid2
I1022 10:47:12.484241 8564 net.cpp:106] Creating Layer Sigmoid2
I1022 10:47:12.484263 8564 net.cpp:454] Sigmoid2 <- conv2
I1022 10:47:12.484274 8564 net.cpp:397] Sigmoid2 -> conv2 (in-place)
I1022 10:47:12.484287 8564 net.cpp:150] Setting up Sigmoid2
I1022 10:47:12.484297 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.484305 8564 net.cpp:165] Memory required for data: 33997200
I1022 10:47:12.484314 8564 layer_factory.hpp:76] Creating layer pool2
I1022 10:47:12.484328 8564 net.cpp:106] Creating Layer pool2
I1022 10:47:12.484336 8564 net.cpp:454] pool2 <- conv2
I1022 10:47:12.484350 8564 net.cpp:411] pool2 -> pool2
I1022 10:47:12.484395 8564 net.cpp:150] Setting up pool2
I1022 10:47:12.484411 8564 net.cpp:157] Top shape: 100 32 8 8 (204800)
I1022 10:47:12.484419 8564 net.cpp:165] Memory required for data: 34816400
I1022 10:47:12.484428 8564 layer_factory.hpp:76] Creating layer conv3
I1022 10:47:12.484442 8564 net.cpp:106] Creating Layer conv3
I1022 10:47:12.484452 8564 net.cpp:454] conv3 <- pool2
I1022 10:47:12.484463 8564 net.cpp:411] conv3 -> conv3
I1022 10:47:12.485782 8564 net.cpp:150] Setting up conv3
I1022 10:47:12.485788 8564 net.cpp:157] Top shape: 100 64 8 8 (409600)
I1022 10:47:12.485791 8564 net.cpp:165] Memory required for data: 36454800
I1022 10:47:12.485795 8564 layer_factory.hpp:76] Creating layer bn3
I1022 10:47:12.485800 8564 net.cpp:106] Creating Layer bn3
I1022 10:47:12.485802 8564 net.cpp:454] bn3 <- conv3
I1022 10:47:12.485805 8564 net.cpp:397] bn3 -> conv3 (in-place)
I1022 10:47:12.485916 8564 net.cpp:150] Setting up bn3
I1022 10:47:12.485932 8564 net.cpp:157] Top shape: 100 64 8 8 (409600)
I1022 10:47:12.485941 8564 net.cpp:165] Memory required for data: 38093200
I1022 10:47:12.485954 8564 layer_factory.hpp:76] Creating layer Sigmoid3
I1022 10:47:12.485966 8564 net.cpp:106] Creating Layer Sigmoid3
I1022 10:47:12.485976 8564 net.cpp:454] Sigmoid3 <- conv3
I1022 10:47:12.485986 8564 net.cpp:397] Sigmoid3 -> conv3 (in-place)
I1022 10:47:12.485997 8564 net.cpp:150] Setting up Sigmoid3
I1022 10:47:12.486008 8564 net.cpp:157] Top shape: 100 64 8 8 (409600)
I1022 10:47:12.486016 8564 net.cpp:165] Memory required for data: 39731600
I1022 10:47:12.486027 8564 layer_factory.hpp:76] Creating layer pool3
I1022 10:47:12.486037 8564 net.cpp:106] Creating Layer pool3
I1022 10:47:12.486045 8564 net.cpp:454] pool3 <- conv3
I1022 10:47:12.486055 8564 net.cpp:411] pool3 -> pool3
I1022 10:47:12.486081 8564 net.cpp:150] Setting up pool3
I1022 10:47:12.486088 8564 net.cpp:157] Top shape: 100 64 4 4 (102400)
I1022 10:47:12.486089 8564 net.cpp:165] Memory required for data: 40141200
I1022 10:47:12.486091 8564 layer_factory.hpp:76] Creating layer ip1
I1022 10:47:12.486099 8564 net.cpp:106] Creating Layer ip1
I1022 10:47:12.486101 8564 net.cpp:454] ip1 <- pool3
I1022 10:47:12.486104 8564 net.cpp:411] ip1 -> ip1
I1022 10:47:12.486748 8564 net.cpp:150] Setting up ip1
I1022 10:47:12.486757 8564 net.cpp:157] Top shape: 100 10 (1000)
I1022 10:47:12.486758 8564 net.cpp:165] Memory required for data: 40145200
I1022 10:47:12.486765 8564 layer_factory.hpp:76] Creating layer loss
I1022 10:47:12.486773 8564 net.cpp:106] Creating Layer loss
I1022 10:47:12.486789 8564 net.cpp:454] loss <- ip1
I1022 10:47:12.486800 8564 net.cpp:454] loss <- label
I1022 10:47:12.486814 8564 net.cpp:411] loss -> loss
I1022 10:47:12.486829 8564 layer_factory.hpp:76] Creating layer loss
I1022 10:47:12.486891 8564 net.cpp:150] Setting up loss
I1022 10:47:12.486896 8564 net.cpp:157] Top shape: (1)
I1022 10:47:12.486897 8564 net.cpp:160] with loss weight 1
I1022 10:47:12.486915 8564 net.cpp:165] Memory required for data: 40145204
I1022 10:47:12.486918 8564 net.cpp:226] loss needs backward computation.
I1022 10:47:12.486920 8564 net.cpp:226] ip1 needs backward computation.
I1022 10:47:12.486922 8564 net.cpp:226] pool3 needs backward computation.
I1022 10:47:12.486924 8564 net.cpp:226] Sigmoid3 needs backward computation.
I1022 10:47:12.486925 8564 net.cpp:226] bn3 needs backward computation.
I1022 10:47:12.486938 8564 net.cpp:226] conv3 needs backward computation.
I1022 10:47:12.486940 8564 net.cpp:226] pool2 needs backward computation.
I1022 10:47:12.486943 8564 net.cpp:226] Sigmoid2 needs backward computation.
I1022 10:47:12.486944 8564 net.cpp:226] bn2 needs backward computation.
I1022 10:47:12.486945 8564 net.cpp:226] conv2 needs backward computation.
I1022 10:47:12.486948 8564 net.cpp:226] Sigmoid1 needs backward computation.
I1022 10:47:12.486949 8564 net.cpp:226] bn1 needs backward computation.
I1022 10:47:12.486951 8564 net.cpp:226] pool1 needs backward computation.
I1022 10:47:12.486953 8564 net.cpp:226] conv1 needs backward computation.
I1022 10:47:12.486955 8564 net.cpp:228] cifar does not need backward computation.
I1022 10:47:12.486958 8564 net.cpp:270] This network produces output loss
I1022 10:47:12.486964 8564 net.cpp:283] Network initialization done.
I1022 10:47:12.487342 8564 solver.cpp:180] Creating test net (#0) specified by net file: examples/cifar10/cifar10_full_sigmoid_train_test_bn.prototxt
I1022 10:47:12.487367 8564 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer cifar
I1022 10:47:12.487373 8564 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer bn1
I1022 10:47:12.487377 8564 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer bn2
I1022 10:47:12.487380 8564 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer bn3
I1022 10:47:12.487475 8564 net.cpp:49] Initializing net from parameters:
name: "CIFAR10_full"
state {
phase: TEST
}
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
data_param {
source: "examples/cifar10/cifar10_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 0
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.0001
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "bn1"
type: "BatchNorm"
bottom: "pool1"
top: "bn1"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TEST
}
batch_norm_param {
use_global_stats: true
}
}
layer {
name: "Sigmoid1"
type: "Sigmoid"
bottom: "bn1"
top: "bn1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "bn1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 0
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "bn2"
type: "BatchNorm"
bottom: "conv2"
top: "conv2"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TEST
}
batch_norm_param {
use_global_stats: true
}
}
layer {
name: "Sigmoid2"
type: "Sigmoid"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 0
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 2
kernel_size: 5
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "bn3"
type: "BatchNorm"
bottom: "conv3"
top: "conv3"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
include {
phase: TEST
}
batch_norm_param {
use_global_stats: true
}
}
layer {
name: "Sigmoid3"
type: "Sigmoid"
bottom: "conv3"
top: "conv3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
pool: AVE
kernel_size: 3
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool3"
top: "ip1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 10
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip1"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip1"
bottom: "label"
top: "loss"
}
I1022 10:47:12.487540 8564 layer_factory.hpp:76] Creating layer cifar
I1022 10:47:12.487623 8564 net.cpp:106] Creating Layer cifar
I1022 10:47:12.487627 8564 net.cpp:411] cifar -> data
I1022 10:47:12.487640 8564 net.cpp:411] cifar -> label
I1022 10:47:12.487645 8564 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto
I1022 10:47:12.488327 8568 db_lmdb.cpp:38] Opened lmdb examples/cifar10/cifar10_test_lmdb
I1022 10:47:12.488445 8564 data_layer.cpp:45] output data size: 100,3,32,32
I1022 10:47:12.493044 8564 net.cpp:150] Setting up cifar
I1022 10:47:12.493069 8564 net.cpp:157] Top shape: 100 3 32 32 (307200)
I1022 10:47:12.493072 8564 net.cpp:157] Top shape: 100 (100)
I1022 10:47:12.493075 8564 net.cpp:165] Memory required for data: 1229200
I1022 10:47:12.493079 8564 layer_factory.hpp:76] Creating layer label_cifar_1_split
I1022 10:47:12.493089 8564 net.cpp:106] Creating Layer label_cifar_1_split
I1022 10:47:12.493091 8564 net.cpp:454] label_cifar_1_split <- label
I1022 10:47:12.493096 8564 net.cpp:411] label_cifar_1_split -> label_cifar_1_split_0
I1022 10:47:12.493104 8564 net.cpp:411] label_cifar_1_split -> label_cifar_1_split_1
I1022 10:47:12.493218 8564 net.cpp:150] Setting up label_cifar_1_split
I1022 10:47:12.493223 8564 net.cpp:157] Top shape: 100 (100)
I1022 10:47:12.493226 8564 net.cpp:157] Top shape: 100 (100)
I1022 10:47:12.493237 8564 net.cpp:165] Memory required for data: 1230000
I1022 10:47:12.493238 8564 layer_factory.hpp:76] Creating layer conv1
I1022 10:47:12.493247 8564 net.cpp:106] Creating Layer conv1
I1022 10:47:12.493248 8564 net.cpp:454] conv1 <- data
I1022 10:47:12.493252 8564 net.cpp:411] conv1 -> conv1
I1022 10:47:12.493455 8564 net.cpp:150] Setting up conv1
I1022 10:47:12.493461 8564 net.cpp:157] Top shape: 100 32 32 32 (3276800)
I1022 10:47:12.493473 8564 net.cpp:165] Memory required for data: 14337200
I1022 10:47:12.493479 8564 layer_factory.hpp:76] Creating layer pool1
I1022 10:47:12.493484 8564 net.cpp:106] Creating Layer pool1
I1022 10:47:12.493485 8564 net.cpp:454] pool1 <- conv1
I1022 10:47:12.493489 8564 net.cpp:411] pool1 -> pool1
I1022 10:47:12.493526 8564 net.cpp:150] Setting up pool1
I1022 10:47:12.493530 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.493532 8564 net.cpp:165] Memory required for data: 17614000
I1022 10:47:12.493543 8564 layer_factory.hpp:76] Creating layer bn1
I1022 10:47:12.493549 8564 net.cpp:106] Creating Layer bn1
I1022 10:47:12.493551 8564 net.cpp:454] bn1 <- pool1
I1022 10:47:12.493556 8564 net.cpp:411] bn1 -> bn1
I1022 10:47:12.493680 8564 net.cpp:150] Setting up bn1
I1022 10:47:12.493685 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.493686 8564 net.cpp:165] Memory required for data: 20890800
I1022 10:47:12.493702 8564 layer_factory.hpp:76] Creating layer Sigmoid1
I1022 10:47:12.493707 8564 net.cpp:106] Creating Layer Sigmoid1
I1022 10:47:12.493710 8564 net.cpp:454] Sigmoid1 <- bn1
I1022 10:47:12.493712 8564 net.cpp:397] Sigmoid1 -> bn1 (in-place)
I1022 10:47:12.493716 8564 net.cpp:150] Setting up Sigmoid1
I1022 10:47:12.493732 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.493734 8564 net.cpp:165] Memory required for data: 24167600
I1022 10:47:12.493736 8564 layer_factory.hpp:76] Creating layer conv2
I1022 10:47:12.493741 8564 net.cpp:106] Creating Layer conv2
I1022 10:47:12.493743 8564 net.cpp:454] conv2 <- bn1
I1022 10:47:12.493747 8564 net.cpp:411] conv2 -> conv2
I1022 10:47:12.494468 8564 net.cpp:150] Setting up conv2
I1022 10:47:12.494490 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.494500 8564 net.cpp:165] Memory required for data: 27444400
I1022 10:47:12.495412 8564 layer_factory.hpp:76] Creating layer bn2
I1022 10:47:12.495437 8564 net.cpp:106] Creating Layer bn2
I1022 10:47:12.495450 8564 net.cpp:454] bn2 <- conv2
I1022 10:47:12.495460 8564 net.cpp:397] bn2 -> conv2 (in-place)
I1022 10:47:12.495592 8564 net.cpp:150] Setting up bn2
I1022 10:47:12.495607 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.495616 8564 net.cpp:165] Memory required for data: 30721200
I1022 10:47:12.495630 8564 layer_factory.hpp:76] Creating layer Sigmoid2
I1022 10:47:12.495642 8564 net.cpp:106] Creating Layer Sigmoid2
I1022 10:47:12.495652 8564 net.cpp:454] Sigmoid2 <- conv2
I1022 10:47:12.495661 8564 net.cpp:397] Sigmoid2 -> conv2 (in-place)
I1022 10:47:12.495672 8564 net.cpp:150] Setting up Sigmoid2
I1022 10:47:12.495682 8564 net.cpp:157] Top shape: 100 32 16 16 (819200)
I1022 10:47:12.495690 8564 net.cpp:165] Memory required for data: 33998000
I1022 10:47:12.495699 8564 layer_factory.hpp:76] Creating layer pool2
I1022 10:47:12.495712 8564 net.cpp:106] Creating Layer pool2
I1022 10:47:12.495720 8564 net.cpp:454] pool2 <- conv2
I1022 10:47:12.495729 8564 net.cpp:411] pool2 -> pool2
I1022 10:47:12.495950 8564 net.cpp:150] Setting up pool2
I1022 10:47:12.495965 8564 net.cpp:157] Top shape: 100 32 8 8 (204800)
I1022 10:47:12.495972 8564 net.cpp:165] Memory required for data: 34817200
I1022 10:47:12.495982 8564 layer_factory.hpp:76] Creating layer conv3
I1022 10:47:12.495996 8564 net.cpp:106] Creating Layer conv3
I1022 10:47:12.496006 8564 net.cpp:454] conv3 <- pool2
I1022 10:47:12.496016 8564 net.cpp:411] conv3 -> conv3
I1022 10:47:12.497537 8564 net.cpp:150] Setting up conv3
I1022 10:47:12.497558 8564 net.cpp:157] Top shape: 100 64 8 8 (409600)
I1022 10:47:12.497567 8564 net.cpp:165] Memory required for data: 36455600
I1022 10:47:12.497580 8564 layer_factory.hpp:76] Creating layer bn3
I1022 10:47:12.497591 8564 net.cpp:106] Creating Layer bn3
I1022 10:47:12.497601 8564 net.cpp:454] bn3 <- conv3
I1022 10:47:12.497611 8564 net.cpp:397] bn3 -> conv3 (in-place)
I1022 10:47:12.497735 8564 net.cpp:150] Setting up bn3
I1022 10:47:12.497748 8564 net.cpp:157] Top shape: 100 64 8 8 (409600)
I1022 10:47:12.497757 8564 net.cpp:165] Memory required for data: 38094000
I1022 10:47:12.497769 8564 layer_factory.hpp:76] Creating layer Sigmoid3
I1022 10:47:12.497781 8564 net.cpp:106] Creating Layer Sigmoid3
I1022 10:47:12.497788 8564 net.cpp:454] Sigmoid3 <- conv3
I1022 10:47:12.497798 8564 net.cpp:397] Sigmoid3 -> conv3 (in-place)
I1022 10:47:12.497809 8564 net.cpp:150] Setting up Sigmoid3
I1022 10:47:12.497818 8564 net.cpp:157] Top shape: 100 64 8 8 (409600)
I1022 10:47:12.497828 8564 net.cpp:165] Memory required for data: 39732400
I1022 10:47:12.497836 8564 layer_factory.hpp:76] Creating layer pool3
I1022 10:47:12.497848 8564 net.cpp:106] Creating Layer pool3
I1022 10:47:12.497855 8564 net.cpp:454] pool3 <- conv3
I1022 10:47:12.497866 8564 net.cpp:411] pool3 -> pool3
I1022 10:47:12.497891 8564 net.cpp:150] Setting up pool3
I1022 10:47:12.497903 8564 net.cpp:157] Top shape: 100 64 4 4 (102400)
I1022 10:47:12.497911 8564 net.cpp:165] Memory required for data: 40142000
I1022 10:47:12.497921 8564 layer_factory.hpp:76] Creating layer ip1
I1022 10:47:12.497934 8564 net.cpp:106] Creating Layer ip1
I1022 10:47:12.497943 8564 net.cpp:454] ip1 <- pool3
I1022 10:47:12.497953 8564 net.cpp:411] ip1 -> ip1
I1022 10:47:12.498276 8564 net.cpp:150] Setting up ip1
I1022 10:47:12.498301 8564 net.cpp:157] Top shape: 100 10 (1000)
I1022 10:47:12.498309 8564 net.cpp:165] Memory required for data: 40146000
I1022 10:47:12.498323 8564 layer_factory.hpp:76] Creating layer ip1_ip1_0_split
I1022 10:47:12.498337 8564 net.cpp:106] Creating Layer ip1_ip1_0_split
I1022 10:47:12.498347 8564 net.cpp:454] ip1_ip1_0_split <- ip1
I1022 10:47:12.498358 8564 net.cpp:411] ip1_ip1_0_split -> ip1_ip1_0_split_0
I1022 10:47:12.498370 8564 net.cpp:411] ip1_ip1_0_split -> ip1_ip1_0_split_1
I1022 10:47:12.498404 8564 net.cpp:150] Setting up ip1_ip1_0_split
I1022 10:47:12.498416 8564 net.cpp:157] Top shape: 100 10 (1000)
I1022 10:47:12.498426 8564 net.cpp:157] Top shape: 100 10 (1000)
I1022 10:47:12.498435 8564 net.cpp:165] Memory required for data: 40154000
I1022 10:47:12.498443 8564 layer_factory.hpp:76] Creating layer accuracy
I1022 10:47:12.498461 8564 net.cpp:106] Creating Layer accuracy
I1022 10:47:12.498471 8564 net.cpp:454] accuracy <- ip1_ip1_0_split_0
I1022 10:47:12.498479 8564 net.cpp:454] accuracy <- label_cifar_1_split_0
I1022 10:47:12.498491 8564 net.cpp:411] accuracy -> accuracy
I1022 10:47:12.498505 8564 net.cpp:150] Setting up accuracy
I1022 10:47:12.498517 8564 net.cpp:157] Top shape: (1)
I1022 10:47:12.498525 8564 net.cpp:165] Memory required for data: 40154004
I1022 10:47:12.498534 8564 layer_factory.hpp:76] Creating layer loss
I1022 10:47:12.498545 8564 net.cpp:106] Creating Layer loss
I1022 10:47:12.498554 8564 net.cpp:454] loss <- ip1_ip1_0_split_1
I1022 10:47:12.498564 8564 net.cpp:454] loss <- label_cifar_1_split_1
I1022 10:47:12.498576 8564 net.cpp:411] loss -> loss
I1022 10:47:12.498589 8564 layer_factory.hpp:76] Creating layer loss
I1022 10:47:12.498657 8564 net.cpp:150] Setting up loss
I1022 10:47:12.498670 8564 net.cpp:157] Top shape: (1)
I1022 10:47:12.498679 8564 net.cpp:160] with loss weight 1
I1022 10:47:12.498697 8564 net.cpp:165] Memory required for data: 40154008
I1022 10:47:12.498704 8564 net.cpp:226] loss needs backward computation.
I1022 10:47:12.498714 8564 net.cpp:228] accuracy does not need backward computation.
I1022 10:47:12.498724 8564 net.cpp:226] ip1_ip1_0_split needs backward computation.
I1022 10:47:12.498728 8564 net.cpp:226] ip1 needs backward computation.
I1022 10:47:12.498730 8564 net.cpp:226] pool3 needs backward computation.
I1022 10:47:12.498733 8564 net.cpp:226] Sigmoid3 needs backward computation.
I1022 10:47:12.498734 8564 net.cpp:226] bn3 needs backward computation.
I1022 10:47:12.498736 8564 net.cpp:226] conv3 needs backward computation.
I1022 10:47:12.498738 8564 net.cpp:226] pool2 needs backward computation.
I1022 10:47:12.498739 8564 net.cpp:226] Sigmoid2 needs backward computation.
I1022 10:47:12.498741 8564 net.cpp:226] bn2 needs backward computation.
I1022 10:47:12.498744 8564 net.cpp:226] conv2 needs backward computation.
I1022 10:47:12.498745 8564 net.cpp:226] Sigmoid1 needs backward computation.
I1022 10:47:12.498746 8564 net.cpp:226] bn1 needs backward computation.
I1022 10:47:12.498749 8564 net.cpp:226] pool1 needs backward computation.
I1022 10:47:12.498750 8564 net.cpp:226] conv1 needs backward computation.
I1022 10:47:12.498754 8564 net.cpp:228] label_cifar_1_split does not need backward computation.
I1022 10:47:12.498761 8564 net.cpp:228] cifar does not need backward computation.
I1022 10:47:12.498764 8564 net.cpp:270] This network produces output accuracy
I1022 10:47:12.498765 8564 net.cpp:270] This network produces output loss
I1022 10:47:12.498775 8564 net.cpp:283] Network initialization done.
I1022 10:47:12.498841 8564 solver.cpp:59] Solver scaffolding done.
I1022 10:47:12.499248 8564 caffe.cpp:212] Starting Optimization
I1022 10:47:12.499254 8564 solver.cpp:287] Solving CIFAR10_full
I1022 10:47:12.499256 8564 solver.cpp:288] Learning Rate Policy: multistep
I1022 10:47:12.585510 8564 solver.cpp:236] Iteration 0, loss = 2.32097
I1022 10:47:12.585546 8564 solver.cpp:252] Train net output #0: loss = 2.32097 (* 1 = 2.32097 loss)
I1022 10:47:12.585561 8564 sgd_solver.cpp:106] Iteration 0, lr = 0.001
I1022 10:47:23.672806 8564 solver.cpp:236] Iteration 100, loss = 2.00252
I1022 10:47:23.672839 8564 solver.cpp:252] Train net output #0: loss = 2.00252 (* 1 = 2.00252 loss)
I1022 10:47:23.672844 8564 sgd_solver.cpp:106] Iteration 100, lr = 0.001
I1022 10:47:34.785794 8564 solver.cpp:236] Iteration 200, loss = 1.91384
I1022 10:47:34.785820 8564 solver.cpp:252] Train net output #0: loss = 1.91384 (* 1 = 1.91384 loss)
I1022 10:47:34.785825 8564 sgd_solver.cpp:106] Iteration 200, lr = 0.001
I1022 10:47:46.001471 8564 solver.cpp:236] Iteration 300, loss = 1.67202
I1022 10:47:46.001549 8564 solver.cpp:252] Train net output #0: loss = 1.67202 (* 1 = 1.67202 loss)
I1022 10:47:46.001554 8564 sgd_solver.cpp:106] Iteration 300, lr = 0.001
I1022 10:47:57.287842 8564 solver.cpp:236] Iteration 400, loss = 1.54156
I1022 10:47:57.287878 8564 solver.cpp:252] Train net output #0: loss = 1.54156 (* 1 = 1.54156 loss)
I1022 10:47:57.287883 8564 sgd_solver.cpp:106] Iteration 400, lr = 0.001
I1022 10:48:08.542645 8564 solver.cpp:236] Iteration 500, loss = 1.74834
I1022 10:48:08.542682 8564 solver.cpp:252] Train net output #0: loss = 1.74834 (* 1 = 1.74834 loss)
I1022 10:48:08.542690 8564 sgd_solver.cpp:106] Iteration 500, lr = 0.001
I1022 10:48:19.854049 8564 solver.cpp:236] Iteration 600, loss = 1.54865
I1022 10:48:19.854112 8564 solver.cpp:252] Train net output #0: loss = 1.54865 (* 1 = 1.54865 loss)
I1022 10:48:19.854118 8564 sgd_solver.cpp:106] Iteration 600, lr = 0.001
I1022 10:48:30.431449 8564 solver.cpp:236] Iteration 700, loss = 1.63565
I1022 10:48:30.431483 8564 solver.cpp:252] Train net output #0: loss = 1.63565 (* 1 = 1.63565 loss)
I1022 10:48:30.431488 8564 sgd_solver.cpp:106] Iteration 700, lr = 0.001
I1022 10:48:41.589982 8564 solver.cpp:236] Iteration 800, loss = 1.46557
I1022 10:48:41.590018 8564 solver.cpp:252] Train net output #0: loss = 1.46557 (* 1 = 1.46557 loss)
I1022 10:48:41.590024 8564 sgd_solver.cpp:106] Iteration 800, lr = 0.001
I1022 10:48:52.775574 8564 solver.cpp:236] Iteration 900, loss = 1.28691
I1022 10:48:52.775684 8564 solver.cpp:252] Train net output #0: loss = 1.28691 (* 1 = 1.28691 loss)
I1022 10:48:52.775691 8564 sgd_solver.cpp:106] Iteration 900, lr = 0.001
I1022 10:49:03.904574 8564 solver.cpp:340] Iteration 1000, Testing net (#0)
I1022 10:49:09.307780 8564 solver.cpp:408] Test net output #0: accuracy = 0.2611
I1022 10:49:09.307806 8564 solver.cpp:408] Test net output #1: loss = 2.35121 (* 1 = 2.35121 loss)
I1022 10:49:09.368461 8564 solver.cpp:236] Iteration 1000, loss = 1.56718
I1022 10:49:09.368495 8564 solver.cpp:252] Train net output #0: loss = 1.56718 (* 1 = 1.56718 loss)
I1022 10:49:09.368501 8564 sgd_solver.cpp:106] Iteration 1000, lr = 0.001
I1022 10:49:20.522966 8564 solver.cpp:236] Iteration 1100, loss = 1.40465
I1022 10:49:20.523023 8564 solver.cpp:252] Train net output #0: loss = 1.40465 (* 1 = 1.40465 loss)
I1022 10:49:20.523033 8564 sgd_solver.cpp:106] Iteration 1100, lr = 0.001
I1022 10:49:31.700487 8564 solver.cpp:236] Iteration 1200, loss = 1.51424
I1022 10:49:31.700564 8564 solver.cpp:252] Train net output #0: loss = 1.51424 (* 1 = 1.51424 loss)
I1022 10:49:31.700572 8564 sgd_solver.cpp:106] Iteration 1200, lr = 0.001
I1022 10:49:42.873749 8564 solver.cpp:236] Iteration 1300, loss = 1.32168
I1022 10:49:42.873785 8564 solver.cpp:252] Train net output #0: loss = 1.32168 (* 1 = 1.32168 loss)
I1022 10:49:42.873790 8564 sgd_solver.cpp:106] Iteration 1300, lr = 0.001
I1022 10:49:54.031334 8564 solver.cpp:236] Iteration 1400, loss = 1.10562
I1022 10:49:54.031373 8564 solver.cpp:252] Train net output #0: loss = 1.10562 (* 1 = 1.10562 loss)
I1022 10:49:54.031379 8564 sgd_solver.cpp:106] Iteration 1400, lr = 0.001
I1022 10:50:05.151456 8564 solver.cpp:236] Iteration 1500, loss = 1.4976
I1022 10:50:05.151557 8564 solver.cpp:252] Train net output #0: loss = 1.4976 (* 1 = 1.4976 loss)
I1022 10:50:05.151562 8564 sgd_solver.cpp:106] Iteration 1500, lr = 0.001
I1022 10:50:15.751418 8564 solver.cpp:236] Iteration 1600, loss = 1.26973
I1022 10:50:15.751456 8564 solver.cpp:252] Train net output #0: loss = 1.26973 (* 1 = 1.26973 loss)
I1022 10:50:15.751461 8564 sgd_solver.cpp:106] Iteration 1600, lr = 0.001
I1022 10:50:26.941982 8564 solver.cpp:236] Iteration 1700, loss = 1.37544
I1022 10:50:26.942031 8564 solver.cpp:252] Train net output #0: loss = 1.37544 (* 1 = 1.37544 loss)
I1022 10:50:26.942044 8564 sgd_solver.cpp:106] Iteration 1700, lr = 0.001
I1022 10:50:38.099313 8564 solver.cpp:236] Iteration 1800, loss = 1.21621
I1022 10:50:38.099403 8564 solver.cpp:252] Train net output #0: loss = 1.21621 (* 1 = 1.21621 loss)
I1022 10:50:38.099417 8564 sgd_solver.cpp:106] Iteration 1800, lr = 0.001
I1022 10:50:49.302039 8564 solver.cpp:236] Iteration 1900, loss = 1.06617
I1022 10:50:49.302084 8564 solver.cpp:252] Train net output #0: loss = 1.06617 (* 1 = 1.06617 loss)
I1022 10:50:49.302098 8564 sgd_solver.cpp:106] Iteration 1900, lr = 0.001
I1022 10:51:00.498013 8564 solver.cpp:340] Iteration 2000, Testing net (#0)
I1022 10:51:05.954262 8564 solver.cpp:408] Test net output #0: accuracy = 0.433
I1022 10:51:05.954301 8564 solver.cpp:408] Test net output #1: loss = 1.59692 (* 1 = 1.59692 loss)
I1022 10:51:06.020668 8564 solver.cpp:236] Iteration 2000, loss = 1.40223
I1022 10:51:06.020705 8564 solver.cpp:252] Train net output #0: loss = 1.40223 (* 1 = 1.40223 loss)
I1022 10:51:06.020714 8564 sgd_solver.cpp:106] Iteration 2000, lr = 0.001
I1022 10:51:17.194422 8564 solver.cpp:236] Iteration 2100, loss = 1.20316
I1022 10:51:17.194530 8564 solver.cpp:252] Train net output #0: loss = 1.20316 (* 1 = 1.20316 loss)
I1022 10:51:17.194536 8564 sgd_solver.cpp:106] Iteration 2100, lr = 0.001
I1022 10:51:28.343837 8564 solver.cpp:236] Iteration 2200, loss = 1.28942
I1022 10:51:28.343873 8564 solver.cpp:252] Train net output #0: loss = 1.28942 (* 1 = 1.28942 loss)
I1022 10:51:28.343879 8564 sgd_solver.cpp:106] Iteration 2200, lr = 0.001
I1022 10:51:39.587674 8564 solver.cpp:236] Iteration 2300, loss = 1.1539
I1022 10:51:39.587702 8564 solver.cpp:252] Train net output #0: loss = 1.1539 (* 1 = 1.1539 loss)
I1022 10:51:39.587708 8564 sgd_solver.cpp:106] Iteration 2300, lr = 0.001
I1022 10:51:50.779501 8564 solver.cpp:236] Iteration 2400, loss = 1.02162
I1022 10:51:50.779579 8564 solver.cpp:252] Train net output #0: loss = 1.02162 (* 1 = 1.02162 loss)
I1022 10:51:50.779587 8564 sgd_solver.cpp:106] Iteration 2400, lr = 0.001
I1022 10:52:01.780865 8564 solver.cpp:236] Iteration 2500, loss = 1.34051
I1022 10:52:01.780903 8564 solver.cpp:252] Train net output #0: loss = 1.34051 (* 1 = 1.34051 loss)
I1022 10:52:01.780910 8564 sgd_solver.cpp:106] Iteration 2500, lr = 0.001
I1022 10:52:12.687367 8564 solver.cpp:236] Iteration 2600, loss = 1.14407
I1022 10:52:12.687405 8564 solver.cpp:252] Train net output #0: loss = 1.14407 (* 1 = 1.14407 loss)
I1022 10:52:12.687412 8564 sgd_solver.cpp:106] Iteration 2600, lr = 0.001
I1022 10:52:24.006764 8564 solver.cpp:236] Iteration 2700, loss = 1.22465
I1022 10:52:24.006851 8564 solver.cpp:252] Train net output #0: loss = 1.22465 (* 1 = 1.22465 loss)
I1022 10:52:24.006857 8564 sgd_solver.cpp:106] Iteration 2700, lr = 0.001
I1022 10:52:35.180367 8564 solver.cpp:236] Iteration 2800, loss = 1.11153
I1022 10:52:35.180408 8564 solver.cpp:252] Train net output #0: loss = 1.11153 (* 1 = 1.11153 loss)
I1022 10:52:35.180413 8564 sgd_solver.cpp:106] Iteration 2800, lr = 0.001
I1022 10:52:46.357211 8564 solver.cpp:236] Iteration 2900, loss = 0.991401
I1022 10:52:46.357241 8564 solver.cpp:252] Train net output #0: loss = 0.991401 (* 1 = 0.991401 loss)
I1022 10:52:46.357249 8564 sgd_solver.cpp:106] Iteration 2900, lr = 0.001
I1022 10:52:57.411651 8564 solver.cpp:340] Iteration 3000, Testing net (#0)
I1022 10:53:02.781502 8564 solver.cpp:408] Test net output #0: accuracy = 0.4682
I1022 10:53:02.781536 8564 solver.cpp:408] Test net output #1: loss = 1.44699 (* 1 = 1.44699 loss)
I1022 10:53:02.842921 8564 solver.cpp:236] Iteration 3000, loss = 1.31119
I1022 10:53:02.842955 8564 solver.cpp:252] Train net output #0: loss = 1.31119 (* 1 = 1.31119 loss)
I1022 10:53:02.842960 8564 sgd_solver.cpp:106] Iteration 3000, lr = 0.001
I1022 10:53:13.969511 8564 solver.cpp:236] Iteration 3100, loss = 1.10167
I1022 10:53:13.969535 8564 solver.cpp:252] Train net output #0: loss = 1.10167 (* 1 = 1.10167 loss)
I1022 10:53:13.969540 8564 sgd_solver.cpp:106] Iteration 3100, lr = 0.001
I1022 10:53:25.092070 8564 solver.cpp:236] Iteration 3200, loss = 1.17905
I1022 10:53:25.092097 8564 solver.cpp:252] Train net output #0: loss = 1.17905 (* 1 = 1.17905 loss)
I1022 10:53:25.092103 8564 sgd_solver.cpp:106] Iteration 3200, lr = 0.001
I1022 10:53:36.228162 8564 solver.cpp:236] Iteration 3300, loss = 1.07725
I1022 10:53:36.228250 8564 solver.cpp:252] Train net output #0: loss = 1.07725 (* 1 = 1.07725 loss)
I1022 10:53:36.228255 8564 sgd_solver.cpp:106] Iteration 3300, lr = 0.001
I1022 10:53:47.350854 8564 solver.cpp:236] Iteration 3400, loss = 0.958348
I1022 10:53:47.350880 8564 solver.cpp:252] Train net output #0: loss = 0.958348 (* 1 = 0.958348 loss)
I1022 10:53:47.350886 8564 sgd_solver.cpp:106] Iteration 3400, lr = 0.001
I1022 10:53:57.929698 8564 solver.cpp:236] Iteration 3500, loss = 1.28172
I1022 10:53:57.929733 8564 solver.cpp:252] Train net output #0: loss = 1.28172 (* 1 = 1.28172 loss)
I1022 10:53:57.929736 8564 sgd_solver.cpp:106] Iteration 3500, lr = 0.001
I1022 10:54:09.044430 8564 solver.cpp:236] Iteration 3600, loss = 1.06781
I1022 10:54:09.044541 8564 solver.cpp:252] Train net output #0: loss = 1.06781 (* 1 = 1.06781 loss)
I1022 10:54:09.044548 8564 sgd_solver.cpp:106] Iteration 3600, lr = 0.001
I1022 10:54:20.169020 8564 solver.cpp:236] Iteration 3700, loss = 1.14847
I1022 10:54:20.169054 8564 solver.cpp:252] Train net output #0: loss = 1.14847 (* 1 = 1.14847 loss)
I1022 10:54:20.169057 8564 sgd_solver.cpp:106] Iteration 3700, lr = 0.001
I1022 10:54:31.299976 8564 solver.cpp:236] Iteration 3800, loss = 1.0454
I1022 10:54:31.300004 8564 solver.cpp:252] Train net output #0: loss = 1.0454 (* 1 = 1.0454 loss)
I1022 10:54:31.300009 8564 sgd_solver.cpp:106] Iteration 3800, lr = 0.001
I1022 10:54:42.414695 8564 solver.cpp:236] Iteration 3900, loss = 0.932119
I1022 10:54:42.414767 8564 solver.cpp:252] Train net output #0: loss = 0.932119 (* 1 = 0.932119 loss)
I1022 10:54:42.414773 8564 sgd_solver.cpp:106] Iteration 3900, lr = 0.001
I1022 10:54:53.471880 8564 solver.cpp:340] Iteration 4000, Testing net (#0)
I1022 10:54:59.028641 8564 solver.cpp:408] Test net output #0: accuracy = 0.5081
I1022 10:54:59.028719 8564 solver.cpp:408] Test net output #1: loss = 1.34308 (* 1 = 1.34308 loss)
I1022 10:54:59.091145 8564 solver.cpp:236] Iteration 4000, loss = 1.25277
I1022 10:54:59.091172 8564 solver.cpp:252] Train net output #0: loss = 1.25277 (* 1 = 1.25277 loss)
I1022 10:54:59.091177 8564 sgd_solver.cpp:106] Iteration 4000, lr = 0.001
I1022 10:55:10.318908 8564 solver.cpp:236] Iteration 4100, loss = 1.0367
I1022 10:55:10.318945 8564 solver.cpp:252] Train net output #0: loss = 1.0367 (* 1 = 1.0367 loss)
I1022 10:55:10.318951 8564 sgd_solver.cpp:106] Iteration 4100, lr = 0.001
I1022 10:55:21.530535 8564 solver.cpp:236] Iteration 4200, loss = 1.12556
I1022 10:55:21.530627 8564 solver.cpp:252] Train net output #0: loss = 1.12556 (* 1 = 1.12556 loss)
I1022 10:55:21.530642 8564 sgd_solver.cpp:106] Iteration 4200, lr = 0.001
I1022 10:55:32.687075 8564 solver.cpp:236] Iteration 4300, loss = 1.01821
I1022 10:55:32.687113 8564 solver.cpp:252] Train net output #0: loss = 1.01821 (* 1 = 1.01821 loss)
I1022 10:55:32.687119 8564 sgd_solver.cpp:106] Iteration 4300, lr = 0.001
I1022 10:55:43.568413 8564 solver.cpp:236] Iteration 4400, loss = 0.908041
I1022 10:55:43.568449 8564 solver.cpp:252] Train net output #0: loss = 0.908041 (* 1 = 0.908041 loss)
I1022 10:55:43.568455 8564 sgd_solver.cpp:106] Iteration 4400, lr = 0.001
I1022 10:55:54.459111 8564 solver.cpp:236] Iteration 4500, loss = 1.2231
I1022 10:55:54.459228 8564 solver.cpp:252] Train net output #0: loss = 1.2231 (* 1 = 1.2231 loss)
I1022 10:55:54.459234 8564 sgd_solver.cpp:106] Iteration 4500, lr = 0.001
I1022 10:56:05.629084 8564 solver.cpp:236] Iteration 4600, loss = 1.00847
I1022 10:56:05.629120 8564 solver.cpp:252] Train net output #0: loss = 1.00847 (* 1 = 1.00847 loss)
I1022 10:56:05.629127 8564 sgd_solver.cpp:106] Iteration 4600, lr = 0.001
I1022 10:56:16.782022 8564 solver.cpp:236] Iteration 4700, loss = 1.10768
I1022 10:56:16.782059 8564 solver.cpp:252] Train net output #0: loss = 1.10768 (* 1 = 1.10768 loss)
I1022 10:56:16.782064 8564 sgd_solver.cpp:106] Iteration 4700, lr = 0.001
I1022 10:56:27.917898 8564 solver.cpp:236] Iteration 4800, loss = 0.993115
I1022 10:56:27.917973 8564 solver.cpp:252] Train net output #0: loss = 0.993115 (* 1 = 0.993115 loss)
I1022 10:56:27.917979 8564 sgd_solver.cpp:106] Iteration 4800, lr = 0.001
I1022 10:56:39.055897 8564 solver.cpp:236] Iteration 4900, loss = 0.88821
I1022 10:56:39.055954 8564 solver.cpp:252] Train net output #0: loss = 0.88821 (* 1 = 0.88821 loss)
I1022 10:56:39.055968 8564 sgd_solver.cpp:106] Iteration 4900, lr = 0.001
I1022 10:56:50.117326 8564 solver.cpp:340] Iteration 5000, Testing net (#0)
I1022 10:56:55.494091 8564 solver.cpp:408] Test net output #0: accuracy = 0.5277
I1022 10:56:55.494128 8564 solver.cpp:408] Test net output #1: loss = 1.28907 (* 1 = 1.28907 loss)
I1022 10:56:55.553964 8564 solver.cpp:236] Iteration 5000, loss = 1.19346
I1022 10:56:55.553992 8564 solver.cpp:252] Train net output #0: loss = 1.19346 (* 1 = 1.19346 loss)
I1022 10:56:55.553997 8564 sgd_solver.cpp:106] Iteration 5000, lr = 0.001
I1022 10:57:06.736750 8564 solver.cpp:236] Iteration 5100, loss = 0.981668
I1022 10:57:06.736826 8564 solver.cpp:252] Train net output #0: loss = 0.981668 (* 1 = 0.981668 loss)
I1022 10:57:06.736840 8564 sgd_solver.cpp:106] Iteration 5100, lr = 0.001
I1022 10:57:17.931282 8564 solver.cpp:236] Iteration 5200, loss = 1.09377
I1022 10:57:17.931318 8564 solver.cpp:252] Train net output #0: loss = 1.09377 (* 1 = 1.09377 loss)
I1022 10:57:17.931324 8564 sgd_solver.cpp:106] Iteration 5200, lr = 0.001
I1022 10:57:29.089121 8564 solver.cpp:236] Iteration 5300, loss = 0.970221
I1022 10:57:29.089157 8564 solver.cpp:252] Train net output #0: loss = 0.970221 (* 1 = 0.970221 loss)
I1022 10:57:29.089164 8564 sgd_solver.cpp:106] Iteration 5300, lr = 0.001
I1022 10:57:39.725975 8564 solver.cpp:236] Iteration 5400, loss = 0.870625
I1022 10:57:39.726053 8564 solver.cpp:252] Train net output #0: loss = 0.870625 (* 1 = 0.870625 loss)
I1022 10:57:39.726068 8564 sgd_solver.cpp:106] Iteration 5400, lr = 0.001
I1022 10:57:50.877043 8564 solver.cpp:236] Iteration 5500, loss = 1.16349
I1022 10:57:50.877079 8564 solver.cpp:252] Train net output #0: loss = 1.16349 (* 1 = 1.16349 loss)
I1022 10:57:50.877084 8564 sgd_solver.cpp:106] Iteration 5500, lr = 0.001
I1022 10:58:02.045374 8564 solver.cpp:236] Iteration 5600, loss = 0.957349
I1022 10:58:02.045411 8564 solver.cpp:252] Train net output #0: loss = 0.957349 (* 1 = 0.957349 loss)
I1022 10:58:02.045418 8564 sgd_solver.cpp:106] Iteration 5600, lr = 0.001
I1022 10:58:13.218036 8564 solver.cpp:236] Iteration 5700, loss = 1.07998
I1022 10:58:13.218133 8564 solver.cpp:252] Train net output #0: loss = 1.07998 (* 1 = 1.07998 loss)
I1022 10:58:13.218139 8564 sgd_solver.cpp:106] Iteration 5700, lr = 0.001
I1022 10:58:24.429282 8564 solver.cpp:236] Iteration 5800, loss = 0.94905
I1022 10:58:24.429318 8564 solver.cpp:252] Train net output #0: loss = 0.94905 (* 1 = 0.94905 loss)
I1022 10:58:24.429324 8564 sgd_solver.cpp:106] Iteration 5800, lr = 0.001
I1022 10:58:35.542251 8564 solver.cpp:236] Iteration 5900, loss = 0.854363
I1022 10:58:35.542279 8564 solver.cpp:252] Train net output #0: loss = 0.854363 (* 1 = 0.854363 loss)
I1022 10:58:35.542284 8564 sgd_solver.cpp:106] Iteration 5900, lr = 0.001
I1022 10:58:46.627142 8564 solver.cpp:340] Iteration 6000, Testing net (#0)
I1022 10:58:51.998172 8564 solver.cpp:408] Test net output #0: accuracy = 0.5365
I1022 10:58:51.998198 8564 solver.cpp:408] Test net output #1: loss = 1.28651 (* 1 = 1.28651 loss)
I1022 10:58:52.059741 8564 solver.cpp:236] Iteration 6000, loss = 1.13377
I1022 10:58:52.059770 8564 solver.cpp:252] Train net output #0: loss = 1.13377 (* 1 = 1.13377 loss)
I1022 10:58:52.059775 8564 sgd_solver.cpp:106] Iteration 6000, lr = 0.001
I1022 10:59:03.276649 8564 solver.cpp:236] Iteration 6100, loss = 0.935908
I1022 10:59:03.276685 8564 solver.cpp:252] Train net output #0: loss = 0.935908 (* 1 = 0.935908 loss)
I1022 10:59:03.276690 8564 sgd_solver.cpp:106] Iteration 6100, lr = 0.001
I1022 10:59:14.417157 8564 solver.cpp:236] Iteration 6200, loss = 1.06605
I1022 10:59:14.417193 8564 solver.cpp:252] Train net output #0: loss = 1.06605 (* 1 = 1.06605 loss)
I1022 10:59:14.417198 8564 sgd_solver.cpp:106] Iteration 6200, lr = 0.001
I1022 10:59:25.146883 8564 solver.cpp:236] Iteration 6300, loss = 0.930281
I1022 10:59:25.146986 8564 solver.cpp:252] Train net output #0: loss = 0.930281 (* 1 = 0.930281 loss)
I1022 10:59:25.146991 8564 sgd_solver.cpp:106] Iteration 6300, lr = 0.001
I1022 10:59:36.180222 8564 solver.cpp:236] Iteration 6400, loss = 0.837334
I1022 10:59:36.180259 8564 solver.cpp:252] Train net output #0: loss = 0.837334 (* 1 = 0.837334 loss)
I1022 10:59:36.180265 8564 sgd_solver.cpp:106] Iteration 6400, lr = 0.001
I1022 10:59:47.442986 8564 solver.cpp:236] Iteration 6500, loss = 1.10648
I1022 10:59:47.443020 8564 solver.cpp:252] Train net output #0: loss = 1.10648 (* 1 = 1.10648 loss)
I1022 10:59:47.443027 8564 sgd_solver.cpp:106] Iteration 6500, lr = 0.001
I1022 10:59:58.569824 8564 solver.cpp:236] Iteration 6600, loss = 0.917222
I1022 10:59:58.569929 8564 solver.cpp:252] Train net output #0: loss = 0.917222 (* 1 = 0.917222 loss)
I1022 10:59:58.569936 8564 sgd_solver.cpp:106] Iteration 6600, lr = 0.001
I1022 11:00:09.693030 8564 solver.cpp:236] Iteration 6700, loss = 1.05244
I1022 11:00:09.693065 8564 solver.cpp:252] Train net output #0: loss = 1.05244 (* 1 = 1.05244 loss)
I1022 11:00:09.693071 8564 sgd_solver.cpp:106] Iteration 6700, lr = 0.001
I1022 11:00:20.911839 8564 solver.cpp:236] Iteration 6800, loss = 0.912773
I1022 11:00:20.911865 8564 solver.cpp:252] Train net output #0: loss = 0.912773 (* 1 = 0.912773 loss)
I1022 11:00:20.911870 8564 sgd_solver.cpp:106] Iteration 6800, lr = 0.001
I1022 11:00:32.065438 8564 solver.cpp:236] Iteration 6900, loss = 0.821233
I1022 11:00:32.065534 8564 solver.cpp:252] Train net output #0: loss = 0.821233 (* 1 = 0.821233 loss)
I1022 11:00:32.065548 8564 sgd_solver.cpp:106] Iteration 6900, lr = 0.001
I1022 11:00:43.133543 8564 solver.cpp:340] Iteration 7000, Testing net (#0)
I1022 11:00:48.502912 8564 solver.cpp:408] Test net output #0: accuracy = 0.5371
I1022 11:00:48.502943 8564 solver.cpp:408] Test net output #1: loss = 1.31288 (* 1 = 1.31288 loss)
I1022 11:00:48.566998 8564 solver.cpp:236] Iteration 7000, loss = 1.08283
I1022 11:00:48.567031 8564 solver.cpp:252] Train net output #0: loss = 1.08283 (* 1 = 1.08283 loss)
I1022 11:00:48.567037 8564 sgd_solver.cpp:106] Iteration 7000, lr = 0.001
I1022 11:00:59.689734 8564 solver.cpp:236] Iteration 7100, loss = 0.900495
I1022 11:00:59.689760 8564 solver.cpp:252] Train net output #0: loss = 0.900495 (* 1 = 0.900495 loss)
I1022 11:00:59.689766 8564 sgd_solver.cpp:106] Iteration 7100, lr = 0.001
I1022 11:01:10.820148 8564 solver.cpp:236] Iteration 7200, loss = 1.04202
I1022 11:01:10.820230 8564 solver.cpp:252] Train net output #0: loss = 1.04202 (* 1 = 1.04202 loss)
I1022 11:01:10.820243 8564 sgd_solver.cpp:106] Iteration 7200, lr = 0.001
I1022 11:01:21.419250 8564 solver.cpp:236] Iteration 7300, loss = 0.897834
I1022 11:01:21.419277 8564 solver.cpp:252] Train net output #0: loss = 0.897834 (* 1 = 0.897834 loss)
I1022 11:01:21.419283 8564 sgd_solver.cpp:106] Iteration 7300, lr = 0.001
I1022 11:01:32.553884 8564 solver.cpp:236] Iteration 7400, loss = 0.807126
I1022 11:01:32.553920 8564 solver.cpp:252] Train net output #0: loss = 0.807126 (* 1 = 0.807126 loss)
I1022 11:01:32.553926 8564 sgd_solver.cpp:106] Iteration 7400, lr = 0.001
I1022 11:01:43.689129 8564 solver.cpp:236] Iteration 7500, loss = 1.06264
I1022 11:01:43.689229 8564 solver.cpp:252] Train net output #0: loss = 1.06264 (* 1 = 1.06264 loss)
I1022 11:01:43.689244 8564 sgd_solver.cpp:106] Iteration 7500, lr = 0.001
I1022 11:01:54.822784 8564 solver.cpp:236] Iteration 7600, loss = 0.886083
I1022 11:01:54.822820 8564 solver.cpp:252] Train net output #0: loss = 0.886083 (* 1 = 0.886083 loss)
I1022 11:01:54.822826 8564 sgd_solver.cpp:106] Iteration 7600, lr = 0.001
I1022 11:02:05.959888 8564 solver.cpp:236] Iteration 7700, loss = 1.03317
I1022 11:02:05.959926 8564 solver.cpp:252] Train net output #0: loss = 1.03317 (* 1 = 1.03317 loss)
I1022 11:02:05.959931 8564 sgd_solver.cpp:106] Iteration 7700, lr = 0.001
I1022 11:02:17.088526 8564 solver.cpp:236] Iteration 7800, loss = 0.88481
I1022 11:02:17.088672 8564 solver.cpp:252] Train net output #0: loss = 0.88481 (* 1 = 0.88481 loss)
I1022 11:02:17.088678 8564 sgd_solver.cpp:106] Iteration 7800, lr = 0.001
I1022 11:02:28.228469 8564 solver.cpp:236] Iteration 7900, loss = 0.794562
I1022 11:02:28.228497 8564 solver.cpp:252] Train net output #0: loss = 0.794562 (* 1 = 0.794562 loss)
I1022 11:02:28.228502 8564 sgd_solver.cpp:106] Iteration 7900, lr = 0.001
I1022 11:02:39.296113 8564 solver.cpp:340] Iteration 8000, Testing net (#0)
I1022 11:02:44.658010 8564 solver.cpp:408] Test net output #0: accuracy = 0.5319
I1022 11:02:44.658043 8564 solver.cpp:408] Test net output #1: loss = 1.34801 (* 1 = 1.34801 loss)
I1022 11:02:44.722715 8564 solver.cpp:236] Iteration 8000, loss = 1.0467
I1022 11:02:44.722740 8564 solver.cpp:252] Train net output #0: loss = 1.0467 (* 1 = 1.0467 loss)
I1022 11:02:44.722746 8564 sgd_solver.cpp:106] Iteration 8000, lr = 0.001
I1022 11:02:55.872843 8564 solver.cpp:236] Iteration 8100, loss = 0.87316
I1022 11:02:55.872930 8564 solver.cpp:252] Train net output #0: loss = 0.87316 (* 1 = 0.87316 loss)
I1022 11:02:55.872936 8564 sgd_solver.cpp:106] Iteration 8100, lr = 0.001
I1022 11:03:06.519069 8564 solver.cpp:236] Iteration 8200, loss = 1.027
I1022 11:03:06.519096 8564 solver.cpp:252] Train net output #0: loss = 1.027 (* 1 = 1.027 loss)
I1022 11:03:06.519103 8564 sgd_solver.cpp:106] Iteration 8200, lr = 0.001
I1022 11:03:17.697319 8564 solver.cpp:236] Iteration 8300, loss = 0.875373
I1022 11:03:17.697355 8564 solver.cpp:252] Train net output #0: loss = 0.875373 (* 1 = 0.875373 loss)
I1022 11:03:17.697361 8564 sgd_solver.cpp:106] Iteration 8300, lr = 0.001
I1022 11:03:28.966516 8564 solver.cpp:236] Iteration 8400, loss = 0.784034
I1022 11:03:28.966579 8564 solver.cpp:252] Train net output #0: loss = 0.784034 (* 1 = 0.784034 loss)
I1022 11:03:28.966585 8564 sgd_solver.cpp:106] Iteration 8400, lr = 0.001
I1022 11:03:40.141294 8564 solver.cpp:236] Iteration 8500, loss = 1.03405
I1022 11:03:40.141330 8564 solver.cpp:252] Train net output #0: loss = 1.03405 (* 1 = 1.03405 loss)
I1022 11:03:40.141336 8564 sgd_solver.cpp:106] Iteration 8500, lr = 0.001
I1022 11:03:51.266589 8564 solver.cpp:236] Iteration 8600, loss = 0.861715
I1022 11:03:51.266628 8564 solver.cpp:252] Train net output #0: loss = 0.861715 (* 1 = 0.861715 loss)
I1022 11:03:51.266633 8564 sgd_solver.cpp:106] Iteration 8600, lr = 0.001
I1022 11:04:02.381254 8564 solver.cpp:236] Iteration 8700, loss = 1.02292
I1022 11:04:02.381361 8564 solver.cpp:252] Train net output #0: loss = 1.02292 (* 1 = 1.02292 loss)
I1022 11:04:02.381366 8564 sgd_solver.cpp:106] Iteration 8700, lr = 0.001
I1022 11:04:13.497334 8564 solver.cpp:236] Iteration 8800, loss = 0.867321
I1022 11:04:13.497371 8564 solver.cpp:252] Train net output #0: loss = 0.867321 (* 1 = 0.867321 loss)
I1022 11:04:13.497377 8564 sgd_solver.cpp:106] Iteration 8800, lr = 0.001
I1022 11:04:24.643878 8564 solver.cpp:236] Iteration 8900, loss = 0.774756
I1022 11:04:24.643906 8564 solver.cpp:252] Train net output #0: loss = 0.774756 (* 1 = 0.774756 loss)
I1022 11:04:24.643911 8564 sgd_solver.cpp:106] Iteration 8900, lr = 0.001
I1022 11:04:35.720197 8564 solver.cpp:340] Iteration 9000, Testing net (#0)
I1022 11:04:41.095862 8564 solver.cpp:408] Test net output #0: accuracy = 0.5283
I1022 11:04:41.095897 8564 solver.cpp:408] Test net output #1: loss = 1.37123 (* 1 = 1.37123 loss)
I1022 11:04:41.155951 8564 solver.cpp:236] Iteration 9000, loss = 1.02307
I1022 11:04:41.155977 8564 solver.cpp:252] Train net output #0: loss = 1.02307 (* 1 = 1.02307 loss)
I1022 11:04:41.155982 8564 sgd_solver.cpp:106] Iteration 9000, lr = 0.001
I1022 11:04:52.246351 8564 solver.cpp:236] Iteration 9100, loss = 0.851682
I1022 11:04:52.246389 8564 solver.cpp:252] Train net output #0: loss = 0.851682 (* 1 = 0.851682 loss)
I1022 11:04:52.246397 8564 sgd_solver.cpp:106] Iteration 9100, lr = 0.001
I1022 11:05:02.972261 8564 solver.cpp:236] Iteration 9200, loss = 1.02035
I1022 11:05:02.972296 8564 solver.cpp:252] Train net output #0: loss = 1.02035 (* 1 = 1.02035 loss)
I1022 11:05:02.972302 8564 sgd_solver.cpp:106] Iteration 9200, lr = 0.001
I1022 11:05:14.125594 8564 solver.cpp:236] Iteration 9300, loss = 0.859746
I1022 11:05:14.125676 8564 solver.cpp:252] Train net output #0: loss = 0.859746 (* 1 = 0.859746 loss)
I1022 11:05:14.125691 8564 sgd_solver.cpp:106] Iteration 9300, lr = 0.001
I1022 11:05:25.269296 8564 solver.cpp:236] Iteration 9400, loss = 0.766938
I1022 11:05:25.269353 8564 solver.cpp:252] Train net output #0: loss = 0.766938 (* 1 = 0.766938 loss)
I1022 11:05:25.269366 8564 sgd_solver.cpp:106] Iteration 9400, lr = 0.001
I1022 11:05:36.419596 8564 solver.cpp:236] Iteration 9500, loss = 1.01155
I1022 11:05:36.419631 8564 solver.cpp:252] Train net output #0: loss = 1.01155 (* 1 = 1.01155 loss)
I1022 11:05:36.419637 8564 sgd_solver.cpp:106] Iteration 9500, lr = 0.001
I1022 11:05:47.633718 8564 solver.cpp:236] Iteration 9600, loss = 0.843507
I1022 11:05:47.633806 8564 solver.cpp:252] Train net output #0: loss = 0.843507 (* 1 = 0.843507 loss)
I1022 11:05:47.633812 8564 sgd_solver.cpp:106] Iteration 9600, lr = 0.001
I1022 11:05:58.928779 8564 solver.cpp:236] Iteration 9700, loss = 1.01803
I1022 11:05:58.928814 8564 solver.cpp:252] Train net output #0: loss = 1.01803 (* 1 = 1.01803 loss)
I1022 11:05:58.928819 8564 sgd_solver.cpp:106] Iteration 9700, lr = 0.001
I1022 11:06:10.038100 8564 solver.cpp:236] Iteration 9800, loss = 0.852406
I1022 11:06:10.038135 8564 solver.cpp:252] Train net output #0: loss = 0.852406 (* 1 = 0.852406 loss)
I1022 11:06:10.038141 8564 sgd_solver.cpp:106] Iteration 9800, lr = 0.001
I1022 11:06:21.140257 8564 solver.cpp:236] Iteration 9900, loss = 0.7607
I1022 11:06:21.140364 8564 solver.cpp:252] Train net output #0: loss = 0.7607 (* 1 = 0.7607 loss)
I1022 11:06:21.140370 8564 sgd_solver.cpp:106] Iteration 9900, lr = 0.001
I1022 11:06:32.189060 8564 solver.cpp:340] Iteration 10000, Testing net (#0)
I1022 11:06:37.536795 8564 solver.cpp:408] Test net output #0: accuracy = 0.5362
I1022 11:06:37.536830 8564 solver.cpp:408] Test net output #1: loss = 1.35428 (* 1 = 1.35428 loss)
I1022 11:06:37.596839 8564 solver.cpp:236] Iteration 10000, loss = 0.999949
I1022 11:06:37.596855 8564 solver.cpp:252] Train net output #0: loss = 0.999949 (* 1 = 0.999949 loss)
I1022 11:06:37.596860 8564 sgd_solver.cpp:106] Iteration 10000, lr = 0.001
I1022 11:06:48.214331 8564 solver.cpp:236] Iteration 10100, loss = 0.836307
I1022 11:06:48.214370 8564 solver.cpp:252] Train net output #0: loss = 0.836307 (* 1 = 0.836307 loss)
I1022 11:06:48.214375 8564 sgd_solver.cpp:106] Iteration 10100, lr = 0.001
I1022 11:06:59.703713 8564 solver.cpp:236] Iteration 10200, loss = 1.01446
I1022 11:06:59.703809 8564 solver.cpp:252] Train net output #0: loss = 1.01446 (* 1 = 1.01446 loss)
I1022 11:06:59.703815 8564 sgd_solver.cpp:106] Iteration 10200, lr = 0.001
I1022 11:07:10.990681 8564 solver.cpp:236] Iteration 10300, loss = 0.844063
I1022 11:07:10.990718 8564 solver.cpp:252] Train net output #0: loss = 0.844063 (* 1 = 0.844063 loss)
I1022 11:07:10.990725 8564 sgd_solver.cpp:106] Iteration 10300, lr = 0.001
I1022 11:07:22.325598 8564 solver.cpp:236] Iteration 10400, loss = 0.756001
I1022 11:07:22.325623 8564 solver.cpp:252] Train net output #0: loss = 0.756001 (* 1 = 0.756001 loss)
I1022 11:07:22.325629 8564 sgd_solver.cpp:106] Iteration 10400, lr = 0.001
I1022 11:07:33.638324 8564 solver.cpp:236] Iteration 10500, loss = 0.989357
I1022 11:07:33.638408 8564 solver.cpp:252] Train net output #0: loss = 0.989357 (* 1 = 0.989357 loss)
I1022 11:07:33.638413 8564 sgd_solver.cpp:106] Iteration 10500, lr = 0.001
I1022 11:07:44.758133 8564 solver.cpp:236] Iteration 10600, loss = 0.830034
I1022 11:07:44.758170 8564 solver.cpp:252] Train net output #0: loss = 0.830034 (* 1 = 0.830034 loss)
I1022 11:07:44.758175 8564 sgd_solver.cpp:106] Iteration 10600, lr = 0.001
I1022 11:07:55.887380 8564 solver.cpp:236] Iteration 10700, loss = 1.01092
I1022 11:07:55.887404 8564 solver.cpp:252] Train net output #0: loss = 1.01092 (* 1 = 1.01092 loss)
I1022 11:07:55.887409 8564 sgd_solver.cpp:106] Iteration 10700, lr = 0.001
I1022 11:08:07.015717 8564 solver.cpp:236] Iteration 10800, loss = 0.836887
I1022 11:08:07.015804 8564 solver.cpp:252] Train net output #0: loss = 0.836887 (* 1 = 0.836887 loss)
I1022 11:08:07.015810 8564 sgd_solver.cpp:106] Iteration 10800, lr = 0.001
I1022 11:08:18.133378 8564 solver.cpp:236] Iteration 10900, loss = 0.750266
I1022 11:08:18.133406 8564 solver.cpp:252] Train net output #0: loss = 0.750266 (* 1 = 0.750266 loss)
I1022 11:08:18.133412 8564 sgd_solver.cpp:106] Iteration 10900, lr = 0.001
I1022 11:08:29.220507 8564 solver.cpp:340] Iteration 11000, Testing net (#0)
I1022 11:08:34.411219 8564 solver.cpp:408] Test net output #0: accuracy = 0.5407
I1022 11:08:34.411245 8564 solver.cpp:408] Test net output #1: loss = 1.3382 (* 1 = 1.3382 loss)
I1022 11:08:34.469854 8564 solver.cpp:236] Iteration 11000, loss = 0.979585
I1022 11:08:34.469892 8564 solver.cpp:252] Train net output #0: loss = 0.979585 (* 1 = 0.979585 loss)
I1022 11:08:34.469898 8564 sgd_solver.cpp:106] Iteration 11000, lr = 0.001
I1022 11:08:45.416860 8564 solver.cpp:236] Iteration 11100, loss = 0.825167
I1022 11:08:45.416954 8564 solver.cpp:252] Train net output #0: loss = 0.825167 (* 1 = 0.825167 loss)
I1022 11:08:45.416968 8564 sgd_solver.cpp:106] Iteration 11100, lr = 0.001
I1022 11:08:56.604718 8564 solver.cpp:236] Iteration 11200, loss = 1.00726
I1022 11:08:56.604755 8564 solver.cpp:252] Train net output #0: loss = 1.00726 (* 1 = 1.00726 loss)
I1022 11:08:56.604761 8564 sgd_solver.cpp:106] Iteration 11200, lr = 0.001
I1022 11:09:07.828991 8564 solver.cpp:236] Iteration 11300, loss = 0.829028
I1022 11:09:07.829046 8564 solver.cpp:252] Train net output #0: loss = 0.829028 (* 1 = 0.829028 loss)
I1022 11:09:07.829058 8564 sgd_solver.cpp:106] Iteration 11300, lr = 0.001
I1022 11:09:19.032860 8564 solver.cpp:236] Iteration 11400, loss = 0.746158
I1022 11:09:19.032960 8564 solver.cpp:252] Train net output #0: loss = 0.746158 (* 1 = 0.746158 loss)
I1022 11:09:19.032966 8564 sgd_solver.cpp:106] Iteration 11400, lr = 0.001
I1022 11:09:30.211771 8564 solver.cpp:236] Iteration 11500, loss = 0.970272
I1022 11:09:30.211805 8564 solver.cpp:252] Train net output #0: loss = 0.970272 (* 1 = 0.970272 loss)
I1022 11:09:30.211810 8564 sgd_solver.cpp:106] Iteration 11500, lr = 0.001
I1022 11:09:41.342051 8564 solver.cpp:236] Iteration 11600, loss = 0.821736
I1022 11:09:41.342087 8564 solver.cpp:252] Train net output #0: loss = 0.821736 (* 1 = 0.821736 loss)
I1022 11:09:41.342092 8564 sgd_solver.cpp:106] Iteration 11600, lr = 0.001
I1022 11:09:52.452190 8564 solver.cpp:236] Iteration 11700, loss = 1.00449
I1022 11:09:52.452307 8564 solver.cpp:252] Train net output #0: loss = 1.00449 (* 1 = 1.00449 loss)
I1022 11:09:52.452313 8564 sgd_solver.cpp:106] Iteration 11700, lr = 0.001
I1022 11:10:03.584844 8564 solver.cpp:236] Iteration 11800, loss = 0.822382
I1022 11:10:03.584882 8564 solver.cpp:252] Train net output #0: loss = 0.822382 (* 1 = 0.822382 loss)
I1022 11:10:03.584887 8564 sgd_solver.cpp:106] Iteration 11800, lr = 0.001
I1022 11:10:14.722473 8564 solver.cpp:236] Iteration 11900, loss = 0.742509
I1022 11:10:14.722512 8564 solver.cpp:252] Train net output #0: loss = 0.742509 (* 1 = 0.742509 loss)
I1022 11:10:14.722517 8564 sgd_solver.cpp:106] Iteration 11900, lr = 0.001
I1022 11:10:25.526278 8564 solver.cpp:340] Iteration 12000, Testing net (#0)
I1022 11:10:31.367588 8564 solver.cpp:408] Test net output #0: accuracy = 0.5481
I1022 11:10:31.367638 8564 solver.cpp:408] Test net output #1: loss = 1.31082 (* 1 = 1.31082 loss)
I1022 11:10:31.427549 8564 solver.cpp:236] Iteration 12000, loss = 0.961934
I1022 11:10:31.427590 8564 solver.cpp:252] Train net output #0: loss = 0.961934 (* 1 = 0.961934 loss)
I1022 11:10:31.427603 8564 sgd_solver.cpp:106] Iteration 12000, lr = 0.001
I1022 11:10:42.697722 8564 solver.cpp:236] Iteration 12100, loss = 0.818081
I1022 11:10:42.697759 8564 solver.cpp:252] Train net output #0: loss = 0.818081 (* 1 = 0.818081 loss)
I1022 11:10:42.697765 8564 sgd_solver.cpp:106] Iteration 12100, lr = 0.001
I1022 11:10:53.799393 8564 solver.cpp:236] Iteration 12200, loss = 1.0012
I1022 11:10:53.799429 8564 solver.cpp:252] Train net output #0: loss = 1.0012 (* 1 = 1.0012 loss)
I1022 11:10:53.799435 8564 sgd_solver.cpp:106] Iteration 12200, lr = 0.001
I1022 11:11:04.892750 8564 solver.cpp:236] Iteration 12300, loss = 0.815408
I1022 11:11:04.892848 8564 solver.cpp:252] Train net output #0: loss = 0.815408 (* 1 = 0.815408 loss)
I1022 11:11:04.892863 8564 sgd_solver.cpp:106] Iteration 12300, lr = 0.001
I1022 11:11:15.972353 8564 solver.cpp:236] Iteration 12400, loss = 0.738728
I1022 11:11:15.972391 8564 solver.cpp:252] Train net output #0: loss = 0.738728 (* 1 = 0.738728 loss)
I1022 11:11:15.972396 8564 sgd_solver.cpp:106] Iteration 12400, lr = 0.001
I1022 11:11:27.199014 8564 solver.cpp:236] Iteration 12500, loss = 0.954308
I1022 11:11:27.199050 8564 solver.cpp:252] Train net output #0: loss = 0.954308 (* 1 = 0.954308 loss)
I1022 11:11:27.199056 8564 sgd_solver.cpp:106] Iteration 12500, lr = 0.001
I1022 11:11:38.343364 8564 solver.cpp:236] Iteration 12600, loss = 0.814748
I1022 11:11:38.343442 8564 solver.cpp:252] Train net output #0: loss = 0.814748 (* 1 = 0.814748 loss)
I1022 11:11:38.343448 8564 sgd_solver.cpp:106] Iteration 12600, lr = 0.001
I1022 11:11:49.477916 8564 solver.cpp:236] Iteration 12700, loss = 0.998423
I1022 11:11:49.477952 8564 solver.cpp:252] Train net output #0: loss = 0.998423 (* 1 = 0.998423 loss)
I1022 11:11:49.477958 8564 sgd_solver.cpp:106] Iteration 12700, lr = 0.001
I1022 11:12:00.666256 8564 solver.cpp:236] Iteration 12800, loss = 0.808999
I1022 11:12:00.666290 8564 solver.cpp:252] Train net output #0: loss = 0.808999 (* 1 = 0.808999 loss)
I1022 11:12:00.666296 8564 sgd_solver.cpp:106] Iteration 12800, lr = 0.001
I1022 11:12:11.909124 8564 solver.cpp:236] Iteration 12900, loss = 0.735762
I1022 11:12:11.909216 8564 solver.cpp:252] Train net output #0: loss = 0.735762 (* 1 = 0.735762 loss)
I1022 11:12:11.909224 8564 sgd_solver.cpp:106] Iteration 12900, lr = 0.001
I1022 11:12:22.464344 8564 solver.cpp:340] Iteration 13000, Testing net (#0)
I1022 11:12:27.836658 8564 solver.cpp:408] Test net output #0: accuracy = 0.5545
I1022 11:12:27.836694 8564 solver.cpp:408] Test net output #1: loss = 1.29025 (* 1 = 1.29025 loss)
I1022 11:12:27.902407 8564 solver.cpp:236] Iteration 13000, loss = 0.947423
I1022 11:12:27.902444 8564 solver.cpp:252] Train net output #0: loss = 0.947423 (* 1 = 0.947423 loss)
I1022 11:12:27.902451 8564 sgd_solver.cpp:106] Iteration 13000, lr = 0.001
I1022 11:12:39.058715 8564 solver.cpp:236] Iteration 13100, loss = 0.811838
I1022 11:12:39.058750 8564 solver.cpp:252] Train net output #0: loss = 0.811838 (* 1 = 0.811838 loss)
I1022 11:12:39.058756 8564 sgd_solver.cpp:106] Iteration 13100, lr = 0.001
I1022 11:12:50.252737 8564 solver.cpp:236] Iteration 13200, loss = 0.996014
I1022 11:12:50.252817 8564 solver.cpp:252] Train net output #0: loss = 0.996014 (* 1 = 0.996014 loss)
I1022 11:12:50.252825 8564 sgd_solver.cpp:106] Iteration 13200, lr = 0.001
I1022 11:13:01.459527 8564 solver.cpp:236] Iteration 13300, loss = 0.803881
I1022 11:13:01.459558 8564 solver.cpp:252] Train net output #0: loss = 0.803881 (* 1 = 0.803881 loss)
I1022 11:13:01.459563 8564 sgd_solver.cpp:106] Iteration 13300, lr = 0.001
I1022 11:13:12.829118 8564 solver.cpp:236] Iteration 13400, loss = 0.733281
I1022 11:13:12.829152 8564 solver.cpp:252] Train net output #0: loss = 0.733281 (* 1 = 0.733281 loss)
I1022 11:13:12.829157 8564 sgd_solver.cpp:106] Iteration 13400, lr = 0.001
I1022 11:13:23.933081 8564 solver.cpp:236] Iteration 13500, loss = 0.940187
I1022 11:13:23.933150 8564 solver.cpp:252] Train net output #0: loss = 0.940187 (* 1 = 0.940187 loss)
I1022 11:13:23.933156 8564 sgd_solver.cpp:106] Iteration 13500, lr = 0.001
I1022 11:13:35.036816 8564 solver.cpp:236] Iteration 13600, loss = 0.808968
I1022 11:13:35.036855 8564 solver.cpp:252] Train net output #0: loss = 0.808968 (* 1 = 0.808968 loss)
I1022 11:13:35.036860 8564 sgd_solver.cpp:106] Iteration 13600, lr = 0.001
I1022 11:13:46.142202 8564 solver.cpp:236] Iteration 13700, loss = 0.9926
I1022 11:13:46.142241 8564 solver.cpp:252] Train net output #0: loss = 0.9926 (* 1 = 0.9926 loss)
I1022 11:13:46.142246 8564 sgd_solver.cpp:106] Iteration 13700, lr = 0.001
I1022 11:13:57.239224 8564 solver.cpp:236] Iteration 13800, loss = 0.798586
I1022 11:13:57.239305 8564 solver.cpp:252] Train net output #0: loss = 0.798586 (* 1 = 0.798586 loss)
I1022 11:13:57.239311 8564 sgd_solver.cpp:106] Iteration 13800, lr = 0.001
I1022 11:14:08.067500 8564 solver.cpp:236] Iteration 13900, loss = 0.731287
I1022 11:14:08.067539 8564 solver.cpp:252] Train net output #0: loss = 0.731287 (* 1 = 0.731287 loss)
I1022 11:14:08.067545 8564 sgd_solver.cpp:106] Iteration 13900, lr = 0.001
I1022 11:14:19.069638 8564 solver.cpp:340] Iteration 14000, Testing net (#0)
I1022 11:14:24.406167 8564 solver.cpp:408] Test net output #0: accuracy = 0.5616
I1022 11:14:24.406201 8564 solver.cpp:408] Test net output #1: loss = 1.26744 (* 1 = 1.26744 loss)
I1022 11:14:24.467852 8564 solver.cpp:236] Iteration 14000, loss = 0.933317
I1022 11:14:24.467885 8564 solver.cpp:252] Train net output #0: loss = 0.933317 (* 1 = 0.933317 loss)
I1022 11:14:24.467892 8564 sgd_solver.cpp:106] Iteration 14000, lr = 0.001
I1022 11:14:35.680349 8564 solver.cpp:236] Iteration 14100, loss = 0.80619
I1022 11:14:35.680428 8564 solver.cpp:252] Train net output #0: loss = 0.80619 (* 1 = 0.80619 loss)
I1022 11:14:35.680444 8564 sgd_solver.cpp:106] Iteration 14100, lr = 0.001
I1022 11:14:47.013340 8564 solver.cpp:236] Iteration 14200, loss = 0.989585
I1022 11:14:47.013378 8564 solver.cpp:252] Train net output #0: loss = 0.989585 (* 1 = 0.989585 loss)
I1022 11:14:47.013384 8564 sgd_solver.cpp:106] Iteration 14200, lr = 0.001
I1022 11:14:58.274118 8564 solver.cpp:236] Iteration 14300, loss = 0.793235
I1022 11:14:58.274148 8564 solver.cpp:252] Train net output #0: loss = 0.793235 (* 1 = 0.793235 loss)
I1022 11:14:58.274154 8564 sgd_solver.cpp:106] Iteration 14300, lr = 0.001
I1022 11:15:09.438632 8564 solver.cpp:236] Iteration 14400, loss = 0.729674
I1022 11:15:09.438715 8564 solver.cpp:252] Train net output #0: loss = 0.729674 (* 1 = 0.729674 loss)
I1022 11:15:09.438720 8564 sgd_solver.cpp:106] Iteration 14400, lr = 0.001
I1022 11:15:20.577165 8564 solver.cpp:236] Iteration 14500, loss = 0.926367
I1022 11:15:20.577200 8564 solver.cpp:252] Train net output #0: loss = 0.926367 (* 1 = 0.926367 loss)
I1022 11:15:20.577204 8564 sgd_solver.cpp:106] Iteration 14500, lr = 0.001
I1022 11:15:31.728301 8564 solver.cpp:236] Iteration 14600, loss = 0.804492
I1022 11:15:31.728334 8564 solver.cpp:252] Train net output #0: loss = 0.804492 (* 1 = 0.804492 loss)
I1022 11:15:31.728339 8564 sgd_solver.cpp:106] Iteration 14600, lr = 0.001
I1022 11:15:42.894465 8564 solver.cpp:236] Iteration 14700, loss = 0.985793
I1022 11:15:42.894574 8564 solver.cpp:252] Train net output #0: loss = 0.985793 (* 1 = 0.985793 loss)
I1022 11:15:42.894588 8564 sgd_solver.cpp:106] Iteration 14700, lr = 0.001
I1022 11:15:54.076133 8564 solver.cpp:236] Iteration 14800, loss = 0.788194
I1022 11:15:54.076169 8564 solver.cpp:252] Train net output #0: loss = 0.788194 (* 1 = 0.788194 loss)
I1022 11:15:54.076174 8564 sgd_solver.cpp:106] Iteration 14800, lr = 0.001
I1022 11:16:04.727046 8564 solver.cpp:236] Iteration 14900, loss = 0.727778
I1022 11:16:04.727082 8564 solver.cpp:252] Train net output #0: loss = 0.727778 (* 1 = 0.727778 loss)
I1022 11:16:04.727089 8564 sgd_solver.cpp:106] Iteration 14900, lr = 0.001
I1022 11:16:15.877264 8564 solver.cpp:340] Iteration 15000, Testing net (#0)
I1022 11:16:21.270452 8564 solver.cpp:408] Test net output #0: accuracy = 0.5694
I1022 11:16:21.270490 8564 solver.cpp:408] Test net output #1: loss = 1.24658 (* 1 = 1.24658 loss)
I1022 11:16:21.331501 8564 solver.cpp:236] Iteration 15000, loss = 0.91994
I1022 11:16:21.331537 8564 solver.cpp:252] Train net output #0: loss = 0.91994 (* 1 = 0.91994 loss)
I1022 11:16:21.331542 8564 sgd_solver.cpp:106] Iteration 15000, lr = 0.001
I1022 11:16:32.508613 8564 solver.cpp:236] Iteration 15100, loss = 0.802954
I1022 11:16:32.508647 8564 solver.cpp:252] Train net output #0: loss = 0.802954 (* 1 = 0.802954 loss)
I1022 11:16:32.508652 8564 sgd_solver.cpp:106] Iteration 15100, lr = 0.001
I1022 11:16:43.667011 8564 solver.cpp:236] Iteration 15200, loss = 0.982127
I1022 11:16:43.667048 8564 solver.cpp:252] Train net output #0: loss = 0.982127 (* 1 = 0.982127 loss)
I1022 11:16:43.667053 8564 sgd_solver.cpp:106] Iteration 15200, lr = 0.001
I1022 11:16:54.787158 8564 solver.cpp:236] Iteration 15300, loss = 0.784026
I1022 11:16:54.787225 8564 solver.cpp:252] Train net output #0: loss = 0.784026 (* 1 = 0.784026 loss)
I1022 11:16:54.787230 8564 sgd_solver.cpp:106] Iteration 15300, lr = 0.001
I1022 11:17:08.938073 8564 solver.cpp:236] Iteration 15400, loss = 0.726201
I1022 11:17:08.938112 8564 solver.cpp:252] Train net output #0: loss = 0.726201 (* 1 = 0.726201 loss)
I1022 11:17:08.938118 8564 sgd_solver.cpp:106] Iteration 15400, lr = 0.001
I1022 11:17:25.518004 8564 solver.cpp:236] Iteration 15500, loss = 0.914209
I1022 11:17:25.518091 8564 solver.cpp:252] Train net output #0: loss = 0.914209 (* 1 = 0.914209 loss)
I1022 11:17:25.518105 8564 sgd_solver.cpp:106] Iteration 15500, lr = 0.001
I1022 11:17:41.969053 8564 solver.cpp:236] Iteration 15600, loss = 0.800772
I1022 11:17:41.969089 8564 solver.cpp:252] Train net output #0: loss = 0.800772 (* 1 = 0.800772 loss)
I1022 11:17:41.969094 8564 sgd_solver.cpp:106] Iteration 15600, lr = 0.001
I1022 11:17:58.458911 8564 solver.cpp:236] Iteration 15700, loss = 0.979371
I1022 11:17:58.458995 8564 solver.cpp:252] Train net output #0: loss = 0.979371 (* 1 = 0.979371 loss)
I1022 11:17:58.459012 8564 sgd_solver.cpp:106] Iteration 15700, lr = 0.001
I1022 11:18:14.624419 8564 solver.cpp:236] Iteration 15800, loss = 0.780081
I1022 11:18:14.624439 8564 solver.cpp:252] Train net output #0: loss = 0.780081 (* 1 = 0.780081 loss)
I1022 11:18:14.624446 8564 sgd_solver.cpp:106] Iteration 15800, lr = 0.001
I1022 11:18:31.274682 8564 solver.cpp:236] Iteration 15900, loss = 0.724637
I1022 11:18:31.274773 8564 solver.cpp:252] Train net output #0: loss = 0.724637 (* 1 = 0.724637 loss)
I1022 11:18:31.274785 8564 sgd_solver.cpp:106] Iteration 15900, lr = 0.001
I1022 11:18:47.855376 8564 solver.cpp:340] Iteration 16000, Testing net (#0)
I1022 11:18:55.940703 8564 solver.cpp:408] Test net output #0: accuracy = 0.5739
I1022 11:18:55.940752 8564 solver.cpp:408] Test net output #1: loss = 1.23572 (* 1 = 1.23572 loss)
I1022 11:18:56.042127 8564 solver.cpp:236] Iteration 16000, loss = 0.907968
I1022 11:18:56.042177 8564 solver.cpp:252] Train net output #0: loss = 0.907968 (* 1 = 0.907968 loss)
I1022 11:18:56.042189 8564 sgd_solver.cpp:106] Iteration 16000, lr = 0.001
I1022 11:19:12.616708 8564 solver.cpp:236] Iteration 16100, loss = 0.798735
I1022 11:19:12.616796 8564 solver.cpp:252] Train net output #0: loss = 0.798735 (* 1 = 0.798735 loss)
I1022 11:19:12.616802 8564 sgd_solver.cpp:106] Iteration 16100, lr = 0.001
I1022 11:19:29.076385 8564 solver.cpp:236] Iteration 16200, loss = 0.975644
I1022 11:19:29.076413 8564 solver.cpp:252] Train net output #0: loss = 0.975644 (* 1 = 0.975644 loss)
I1022 11:19:29.076418 8564 sgd_solver.cpp:106] Iteration 16200, lr = 0.001
I1022 11:19:45.614449 8564 solver.cpp:236] Iteration 16300, loss = 0.776063
I1022 11:19:45.614531 8564 solver.cpp:252] Train net output #0: loss = 0.776063 (* 1 = 0.776063 loss)
I1022 11:19:45.614544 8564 sgd_solver.cpp:106] Iteration 16300, lr = 0.001
I1022 11:20:01.939510 8564 solver.cpp:236] Iteration 16400, loss = 0.722728
I1022 11:20:01.939538 8564 solver.cpp:252] Train net output #0: loss = 0.722728 (* 1 = 0.722728 loss)
I1022 11:20:01.939543 8564 sgd_solver.cpp:106] Iteration 16400, lr = 0.001
I1022 11:20:18.522455 8564 solver.cpp:236] Iteration 16500, loss = 0.902115
I1022 11:20:18.522598 8564 solver.cpp:252] Train net output #0: loss = 0.902115 (* 1 = 0.902115 loss)
I1022 11:20:18.522627 8564 sgd_solver.cpp:106] Iteration 16500, lr = 0.001
I1022 11:20:35.039793 8564 solver.cpp:236] Iteration 16600, loss = 0.796508
I1022 11:20:35.039821 8564 solver.cpp:252] Train net output #0: loss = 0.796508 (* 1 = 0.796508 loss)
I1022 11:20:35.039827 8564 sgd_solver.cpp:106] Iteration 16600, lr = 0.001
I1022 11:20:51.746162 8564 solver.cpp:236] Iteration 16700, loss = 0.972412
I1022 11:20:51.746352 8564 solver.cpp:252] Train net output #0: loss = 0.972412 (* 1 = 0.972412 loss)
I1022 11:20:51.746366 8564 sgd_solver.cpp:106] Iteration 16700, lr = 0.001
I1022 11:21:07.782186 8564 solver.cpp:236] Iteration 16800, loss = 0.772334
I1022 11:21:07.782222 8564 solver.cpp:252] Train net output #0: loss = 0.772334 (* 1 = 0.772334 loss)
I1022 11:21:07.782228 8564 sgd_solver.cpp:106] Iteration 16800, lr = 0.001
I1022 11:21:24.191629 8564 solver.cpp:236] Iteration 16900, loss = 0.721072
I1022 11:21:24.191701 8564 solver.cpp:252] Train net output #0: loss = 0.721072 (* 1 = 0.721072 loss)
I1022 11:21:24.191707 8564 sgd_solver.cpp:106] Iteration 16900, lr = 0.001
I1022 11:21:40.489735 8564 solver.cpp:340] Iteration 17000, Testing net (#0)
I1022 11:21:48.526864 8564 solver.cpp:408] Test net output #0: accuracy = 0.5768
I1022 11:21:48.526900 8564 solver.cpp:408] Test net output #1: loss = 1.22559 (* 1 = 1.22559 loss)
I1022 11:21:48.621413 8564 solver.cpp:236] Iteration 17000, loss = 0.895788
I1022 11:21:48.621464 8564 solver.cpp:252] Train net output #0: loss = 0.895788 (* 1 = 0.895788 loss)
I1022 11:21:48.621474 8564 sgd_solver.cpp:106] Iteration 17000, lr = 0.001
I1022 11:22:05.027693 8564 solver.cpp:236] Iteration 17100, loss = 0.794307
I1022 11:22:05.027760 8564 solver.cpp:252] Train net output #0: loss = 0.794307 (* 1 = 0.794307 loss)
I1022 11:22:05.027773 8564 sgd_solver.cpp:106] Iteration 17100, lr = 0.001
I1022 11:22:21.424247 8564 solver.cpp:236] Iteration 17200, loss = 0.969369
I1022 11:22:21.424275 8564 solver.cpp:252] Train net output #0: loss = 0.969369 (* 1 = 0.969369 loss)
I1022 11:22:21.424280 8564 sgd_solver.cpp:106] Iteration 17200, lr = 0.001
I1022 11:22:38.082953 8564 solver.cpp:236] Iteration 17300, loss = 0.768728
I1022 11:22:38.083216 8564 solver.cpp:252] Train net output #0: loss = 0.768728 (* 1 = 0.768728 loss)
I1022 11:22:38.083228 8564 sgd_solver.cpp:106] Iteration 17300, lr = 0.001
I1022 11:22:54.353318 8564 solver.cpp:236] Iteration 17400, loss = 0.719676
I1022 11:22:54.353346 8564 solver.cpp:252] Train net output #0: loss = 0.719676 (* 1 = 0.719676 loss)
I1022 11:22:54.353351 8564 sgd_solver.cpp:106] Iteration 17400, lr = 0.001
I1022 11:23:10.783258 8564 solver.cpp:236] Iteration 17500, loss = 0.890551
I1022 11:23:10.783357 8564 solver.cpp:252] Train net output #0: loss = 0.890551 (* 1 = 0.890551 loss)
I1022 11:23:10.783365 8564 sgd_solver.cpp:106] Iteration 17500, lr = 0.001
I1022 11:23:27.494442 8564 solver.cpp:236] Iteration 17600, loss = 0.791581
I1022 11:23:27.494472 8564 solver.cpp:252] Train net output #0: loss = 0.791581 (* 1 = 0.791581 loss)
I1022 11:23:27.494477 8564 sgd_solver.cpp:106] Iteration 17600, lr = 0.001
I1022 11:23:43.656267 8564 solver.cpp:236] Iteration 17700, loss = 0.966551
I1022 11:23:43.656338 8564 solver.cpp:252] Train net output #0: loss = 0.966551 (* 1 = 0.966551 loss)
I1022 11:23:43.656349 8564 sgd_solver.cpp:106] Iteration 17700, lr = 0.001
I1022 11:24:00.333573 8564 solver.cpp:236] Iteration 17800, loss = 0.765147
I1022 11:24:00.333632 8564 solver.cpp:252] Train net output #0: loss = 0.765147 (* 1 = 0.765147 loss)
I1022 11:24:00.333641 8564 sgd_solver.cpp:106] Iteration 17800, lr = 0.001
I1022 11:24:17.216095 8564 solver.cpp:236] Iteration 17900, loss = 0.718045
I1022 11:24:17.216167 8564 solver.cpp:252] Train net output #0: loss = 0.718045 (* 1 = 0.718045 loss)
I1022 11:24:17.216176 8564 sgd_solver.cpp:106] Iteration 17900, lr = 0.001
I1022 11:24:33.808125 8564 solver.cpp:340] Iteration 18000, Testing net (#0)
I1022 11:24:41.843014 8564 solver.cpp:408] Test net output #0: accuracy = 0.5824
I1022 11:24:41.843049 8564 solver.cpp:408] Test net output #1: loss = 1.20896 (* 1 = 1.20896 loss)
I1022 11:24:41.935667 8564 solver.cpp:236] Iteration 18000, loss = 0.885244
I1022 11:24:41.935703 8564 solver.cpp:252] Train net output #0: loss = 0.885244 (* 1 = 0.885244 loss)
I1022 11:24:41.935706 8564 sgd_solver.cpp:106] Iteration 18000, lr = 0.001
I1022 11:24:56.184695 8564 solver.cpp:236] Iteration 18100, loss = 0.788617
I1022 11:24:56.184777 8564 solver.cpp:252] Train net output #0: loss = 0.788617 (* 1 = 0.788617 loss)
I1022 11:24:56.184783 8564 sgd_solver.cpp:106] Iteration 18100, lr = 0.001
I1022 11:25:07.470768 8564 solver.cpp:236] Iteration 18200, loss = 0.964112
I1022 11:25:07.470803 8564 solver.cpp:252] Train net output #0: loss = 0.964112 (* 1 = 0.964112 loss)
I1022 11:25:07.470809 8564 sgd_solver.cpp:106] Iteration 18200, lr = 0.001
I1022 11:25:18.818291 8564 solver.cpp:236] Iteration 18300, loss = 0.761697
I1022 11:25:18.818331 8564 solver.cpp:252] Train net output #0: loss = 0.761697 (* 1 = 0.761697 loss)
I1022 11:25:18.818338 8564 sgd_solver.cpp:106] Iteration 18300, lr = 0.001
I1022 11:25:29.977766 8564 solver.cpp:236] Iteration 18400, loss = 0.71644
I1022 11:25:29.977860 8564 solver.cpp:252] Train net output #0: loss = 0.71644 (* 1 = 0.71644 loss)
I1022 11:25:29.977866 8564 sgd_solver.cpp:106] Iteration 18400, lr = 0.001
I1022 11:25:41.235028 8564 solver.cpp:236] Iteration 18500, loss = 0.879611
I1022 11:25:41.235064 8564 solver.cpp:252] Train net output #0: loss = 0.879611 (* 1 = 0.879611 loss)
I1022 11:25:41.235069 8564 sgd_solver.cpp:106] Iteration 18500, lr = 0.001
I1022 11:25:52.447937 8564 solver.cpp:236] Iteration 18600, loss = 0.785579
I1022 11:25:52.447963 8564 solver.cpp:252] Train net output #0: loss = 0.785579 (* 1 = 0.785579 loss)
I1022 11:25:52.447969 8564 sgd_solver.cpp:106] Iteration 18600, lr = 0.001
I1022 11:26:03.143340 8564 solver.cpp:236] Iteration 18700, loss = 0.96214
I1022 11:26:03.143430 8564 solver.cpp:252] Train net output #0: loss = 0.96214 (* 1 = 0.96214 loss)
I1022 11:26:03.143436 8564 sgd_solver.cpp:106] Iteration 18700, lr = 0.001
I1022 11:26:14.272738 8564 solver.cpp:236] Iteration 18800, loss = 0.758737
I1022 11:26:14.272775 8564 solver.cpp:252] Train net output #0: loss = 0.758737 (* 1 = 0.758737 loss)
I1022 11:26:14.272780 8564 sgd_solver.cpp:106] Iteration 18800, lr = 0.001
I1022 11:26:25.401067 8564 solver.cpp:236] Iteration 18900, loss = 0.714998
I1022 11:26:25.401103 8564 solver.cpp:252] Train net output #0: loss = 0.714998 (* 1 = 0.714998 loss)
I1022 11:26:25.401108 8564 sgd_solver.cpp:106] Iteration 18900, lr = 0.001
I1022 11:26:36.441985 8564 solver.cpp:340] Iteration 19000, Testing net (#0)
I1022 11:26:41.808882 8564 solver.cpp:408] Test net output #0: accuracy = 0.5863
I1022 11:26:41.808917 8564 solver.cpp:408] Test net output #1: loss = 1.19419 (* 1 = 1.19419 loss)
I1022 11:26:41.871855 8564 solver.cpp:236] Iteration 19000, loss = 0.874707
I1022 11:26:41.871889 8564 solver.cpp:252] Train net output #0: loss = 0.874707 (* 1 = 0.874707 loss)
I1022 11:26:41.871896 8564 sgd_solver.cpp:106] Iteration 19000, lr = 0.001
I1022 11:26:53.007905 8564 solver.cpp:236] Iteration 19100, loss = 0.783215
I1022 11:26:53.007941 8564 solver.cpp:252] Train net output #0: loss = 0.783215 (* 1 = 0.783215 loss)
I1022 11:26:53.007946 8564 sgd_solver.cpp:106] Iteration 19100, lr = 0.001
I1022 11:27:04.149597 8564 solver.cpp:236] Iteration 19200, loss = 0.95961
I1022 11:27:04.149632 8564 solver.cpp:252] Train net output #0: loss = 0.95961 (* 1 = 0.95961 loss)
I1022 11:27:04.149637 8564 sgd_solver.cpp:106] Iteration 19200, lr = 0.001
I1022 11:27:15.280393 8564 solver.cpp:236] Iteration 19300, loss = 0.756417
I1022 11:27:15.280501 8564 solver.cpp:252] Train net output #0: loss = 0.756417 (* 1 = 0.756417 loss)
I1022 11:27:15.280506 8564 sgd_solver.cpp:106] Iteration 19300, lr = 0.001
I1022 11:27:26.421196 8564 solver.cpp:236] Iteration 19400, loss = 0.713458
I1022 11:27:26.421247 8564 solver.cpp:252] Train net output #0: loss = 0.713458 (* 1 = 0.713458 loss)
I1022 11:27:26.421257 8564 sgd_solver.cpp:106] Iteration 19400, lr = 0.001
I1022 11:27:37.574345 8564 solver.cpp:236] Iteration 19500, loss = 0.87001
I1022 11:27:37.574383 8564 solver.cpp:252] Train net output #0: loss = 0.87001 (* 1 = 0.87001 loss)
I1022 11:27:37.574388 8564 sgd_solver.cpp:106] Iteration 19500, lr = 0.001
I1022 11:27:48.366206 8564 solver.cpp:236] Iteration 19600, loss = 0.779889
I1022 11:27:48.366286 8564 solver.cpp:252] Train net output #0: loss = 0.779889 (* 1 = 0.779889 loss)
I1022 11:27:48.366291 8564 sgd_solver.cpp:106] Iteration 19600, lr = 0.001
I1022 11:27:59.622241 8564 solver.cpp:236] Iteration 19700, loss = 0.958023
I1022 11:27:59.622279 8564 solver.cpp:252] Train net output #0: loss = 0.958023 (* 1 = 0.958023 loss)
I1022 11:27:59.622285 8564 sgd_solver.cpp:106] Iteration 19700, lr = 0.001
I1022 11:28:11.075382 8564 solver.cpp:236] Iteration 19800, loss = 0.753502
I1022 11:28:11.075420 8564 solver.cpp:252] Train net output #0: loss = 0.753502 (* 1 = 0.753502 loss)
I1022 11:28:11.075426 8564 sgd_solver.cpp:106] Iteration 19800, lr = 0.001
I1022 11:28:22.427258 8564 solver.cpp:236] Iteration 19900, loss = 0.711883
I1022 11:28:22.427316 8564 solver.cpp:252] Train net output #0: loss = 0.711883 (* 1 = 0.711883 loss)
I1022 11:28:22.427323 8564 sgd_solver.cpp:106] Iteration 19900, lr = 0.001
I1022 11:28:33.530458 8564 solver.cpp:340] Iteration 20000, Testing net (#0)
I1022 11:28:38.890738 8564 solver.cpp:408] Test net output #0: accuracy = 0.5903
I1022 11:28:38.890771 8564 solver.cpp:408] Test net output #1: loss = 1.18231 (* 1 = 1.18231 loss)
I1022 11:28:38.950404 8564 solver.cpp:236] Iteration 20000, loss = 0.865208
I1022 11:28:38.950429 8564 solver.cpp:252] Train net output #0: loss = 0.865208 (* 1 = 0.865208 loss)
I1022 11:28:38.950435 8564 sgd_solver.cpp:106] Iteration 20000, lr = 0.001
I1022 11:28:50.120688 8564 solver.cpp:236] Iteration 20100, loss = 0.776528
I1022 11:28:50.120715 8564 solver.cpp:252] Train net output #0: loss = 0.776528 (* 1 = 0.776528 loss)
I1022 11:28:50.120721 8564 sgd_solver.cpp:106] Iteration 20100, lr = 0.001
I1022 11:29:01.329221 8564 solver.cpp:236] Iteration 20200, loss = 0.955798
I1022 11:29:01.329303 8564 solver.cpp:252] Train net output #0: loss = 0.955798 (* 1 = 0.955798 loss)
I1022 11:29:01.329310 8564 sgd_solver.cpp:106] Iteration 20200, lr = 0.001
I1022 11:29:12.538288 8564 solver.cpp:236] Iteration 20300, loss = 0.751241
I1022 11:29:12.538323 8564 solver.cpp:252] Train net output #0: loss = 0.751241 (* 1 = 0.751241 loss)
I1022 11:29:12.538331 8564 sgd_solver.cpp:106] Iteration 20300, lr = 0.001
I1022 11:29:23.880916 8564 solver.cpp:236] Iteration 20400, loss = 0.710354
I1022 11:29:23.880952 8564 solver.cpp:252] Train net output #0: loss = 0.710354 (* 1 = 0.710354 loss)
I1022 11:29:23.880959 8564 sgd_solver.cpp:106] Iteration 20400, lr = 0.001
I1022 11:29:34.894707 8564 solver.cpp:236] Iteration 20500, loss = 0.860603
I1022 11:29:34.894789 8564 solver.cpp:252] Train net output #0: loss = 0.860603 (* 1 = 0.860603 loss)
I1022 11:29:34.894795 8564 sgd_solver.cpp:106] Iteration 20500, lr = 0.001
I1022 11:29:46.037618 8564 solver.cpp:236] Iteration 20600, loss = 0.773506
I1022 11:29:46.037654 8564 solver.cpp:252] Train net output #0: loss = 0.773506 (* 1 = 0.773506 loss)
I1022 11:29:46.037660 8564 sgd_solver.cpp:106] Iteration 20600, lr = 0.001
I1022 11:29:57.307436 8564 solver.cpp:236] Iteration 20700, loss = 0.953711
I1022 11:29:57.307464 8564 solver.cpp:252] Train net output #0: loss = 0.953711 (* 1 = 0.953711 loss)
I1022 11:29:57.307471 8564 sgd_solver.cpp:106] Iteration 20700, lr = 0.001
I1022 11:30:08.457167 8564 solver.cpp:236] Iteration 20800, loss = 0.748636
I1022 11:30:08.457299 8564 solver.cpp:252] Train net output #0: loss = 0.748636 (* 1 = 0.748636 loss)
I1022 11:30:08.457305 8564 sgd_solver.cpp:106] Iteration 20800, lr = 0.001
I1022 11:30:19.559459 8564 solver.cpp:236] Iteration 20900, loss = 0.708837
I1022 11:30:19.559486 8564 solver.cpp:252] Train net output #0: loss = 0.708837 (* 1 = 0.708837 loss)
I1022 11:30:19.559491 8564 sgd_solver.cpp:106] Iteration 20900, lr = 0.001
I1022 11:30:30.583124 8564 solver.cpp:340] Iteration 21000, Testing net (#0)
I1022 11:30:35.926051 8564 solver.cpp:408] Test net output #0: accuracy = 0.5928
I1022 11:30:35.926100 8564 solver.cpp:408] Test net output #1: loss = 1.17083 (* 1 = 1.17083 loss)
I1022 11:30:35.986152 8564 solver.cpp:236] Iteration 21000, loss = 0.855822
I1022 11:30:35.986181 8564 solver.cpp:252] Train net output #0: loss = 0.855822 (* 1 = 0.855822 loss)
I1022 11:30:35.986186 8564 sgd_solver.cpp:106] Iteration 21000, lr = 0.001
I1022 11:30:47.237026 8564 solver.cpp:236] Iteration 21100, loss = 0.770431
I1022 11:30:47.237118 8564 solver.cpp:252] Train net output #0: loss = 0.770431 (* 1 = 0.770431 loss)
I1022 11:30:47.237123 8564 sgd_solver.cpp:106] Iteration 21100, lr = 0.001
I1022 11:30:58.322841 8564 solver.cpp:236] Iteration 21200, loss = 0.951486
I1022 11:30:58.322876 8564 solver.cpp:252] Train net output #0: loss = 0.951486 (* 1 = 0.951486 loss)
I1022 11:30:58.322882 8564 sgd_solver.cpp:106] Iteration 21200, lr = 0.001
I1022 11:31:09.406991 8564 solver.cpp:236] Iteration 21300, loss = 0.74614
I1022 11:31:09.407028 8564 solver.cpp:252] Train net output #0: loss = 0.74614 (* 1 = 0.74614 loss)
I1022 11:31:09.407034 8564 sgd_solver.cpp:106] Iteration 21300, lr = 0.001
I1022 11:31:20.486799 8564 solver.cpp:236] Iteration 21400, loss = 0.707738
I1022 11:31:20.486889 8564 solver.cpp:252] Train net output #0: loss = 0.707738 (* 1 = 0.707738 loss)
I1022 11:31:20.486894 8564 sgd_solver.cpp:106] Iteration 21400, lr = 0.001
I1022 11:31:31.223381 8564 solver.cpp:236] Iteration 21500, loss = 0.851042
I1022 11:31:31.223417 8564 solver.cpp:252] Train net output #0: loss = 0.851042 (* 1 = 0.851042 loss)
I1022 11:31:31.223424 8564 sgd_solver.cpp:106] Iteration 21500, lr = 0.001
I1022 11:31:42.617928 8564 solver.cpp:236] Iteration 21600, loss = 0.767019
I1022 11:31:42.617962 8564 solver.cpp:252] Train net output #0: loss = 0.767019 (* 1 = 0.767019 loss)
I1022 11:31:42.617967 8564 sgd_solver.cpp:106] Iteration 21600, lr = 0.001
I1022 11:31:53.699035 8564 solver.cpp:236] Iteration 21700, loss = 0.949351
I1022 11:31:53.699142 8564 solver.cpp:252] Train net output #0: loss = 0.949351 (* 1 = 0.949351 loss)
I1022 11:31:53.699149 8564 sgd_solver.cpp:106] Iteration 21700, lr = 0.001
I1022 11:32:04.918752 8564 solver.cpp:236] Iteration 21800, loss = 0.743844
I1022 11:32:04.918788 8564 solver.cpp:252] Train net output #0: loss = 0.743844 (* 1 = 0.743844 loss)
I1022 11:32:04.918794 8564 sgd_solver.cpp:106] Iteration 21800, lr = 0.001
I1022 11:32:16.003837 8564 solver.cpp:236] Iteration 21900, loss = 0.706585
I1022 11:32:16.003865 8564 solver.cpp:252] Train net output #0: loss = 0.706585 (* 1 = 0.706585 loss)
I1022 11:32:16.003871 8564 sgd_solver.cpp:106] Iteration 21900, lr = 0.001
I1022 11:32:27.019603 8564 solver.cpp:340] Iteration 22000, Testing net (#0)
I1022 11:32:32.357235 8564 solver.cpp:408] Test net output #0: accuracy = 0.5964
I1022 11:32:32.357269 8564 solver.cpp:408] Test net output #1: loss = 1.15447 (* 1 = 1.15447 loss)
I1022 11:32:32.418025 8564 solver.cpp:236] Iteration 22000, loss = 0.846732
I1022 11:32:32.418059 8564 solver.cpp:252] Train net output #0: loss = 0.846732 (* 1 = 0.846732 loss)
I1022 11:32:32.418066 8564 sgd_solver.cpp:106] Iteration 22000, lr = 0.001
I1022 11:32:43.519179 8564 solver.cpp:236] Iteration 22100, loss = 0.764138
I1022 11:32:43.519207 8564 solver.cpp:252] Train net output #0: loss = 0.764138 (* 1 = 0.764138 loss)
I1022 11:32:43.519213 8564 sgd_solver.cpp:106] Iteration 22100, lr = 0.001
I1022 11:32:54.859508 8564 solver.cpp:236] Iteration 22200, loss = 0.947374
I1022 11:32:54.859544 8564 solver.cpp:252] Train net output #0: loss = 0.947374 (* 1 = 0.947374 loss)
I1022 11:32:54.859549 8564 sgd_solver.cpp:106] Iteration 22200, lr = 0.001
I1022 11:33:05.993993 8564 solver.cpp:236] Iteration 22300, loss = 0.741793
I1022 11:33:05.994052 8564 solver.cpp:252] Train net output #0: loss = 0.741793 (* 1 = 0.741793 loss)
I1022 11:33:05.994057 8564 sgd_solver.cpp:106] Iteration 22300, lr = 0.001
I1022 11:33:16.766129 8564 solver.cpp:236] Iteration 22400, loss = 0.705549
I1022 11:33:16.766155 8564 solver.cpp:252] Train net output #0: loss = 0.705549 (* 1 = 0.705549 loss)
I1022 11:33:16.766161 8564 sgd_solver.cpp:106] Iteration 22400, lr = 0.001
I1022 11:33:27.659243 8564 solver.cpp:236] Iteration 22500, loss = 0.842922
I1022 11:33:27.659278 8564 solver.cpp:252] Train net output #0: loss = 0.842922 (* 1 = 0.842922 loss)
I1022 11:33:27.659284 8564 sgd_solver.cpp:106] Iteration 22500, lr = 0.001
I1022 11:33:38.746275 8564 solver.cpp:236] Iteration 22600, loss = 0.761138
I1022 11:33:38.746358 8564 solver.cpp:252] Train net output #0: loss = 0.761138 (* 1 = 0.761138 loss)
I1022 11:33:38.746364 8564 sgd_solver.cpp:106] Iteration 22600, lr = 0.001
I1022 11:33:49.833434 8564 solver.cpp:236] Iteration 22700, loss = 0.945344
I1022 11:33:49.833472 8564 solver.cpp:252] Train net output #0: loss = 0.945344 (* 1 = 0.945344 loss)
I1022 11:33:49.833477 8564 sgd_solver.cpp:106] Iteration 22700, lr = 0.001
I1022 11:34:00.923348 8564 solver.cpp:236] Iteration 22800, loss = 0.739821
I1022 11:34:00.923383 8564 solver.cpp:252] Train net output #0: loss = 0.739821 (* 1 = 0.739821 loss)
I1022 11:34:00.923388 8564 sgd_solver.cpp:106] Iteration 22800, lr = 0.001
I1022 11:34:12.000700 8564 solver.cpp:236] Iteration 22900, loss = 0.704914
I1022 11:34:12.000787 8564 solver.cpp:252] Train net output #0: loss = 0.704914 (* 1 = 0.704914 loss)
I1022 11:34:12.000793 8564 sgd_solver.cpp:106] Iteration 22900, lr = 0.001
I1022 11:34:23.007793 8564 solver.cpp:340] Iteration 23000, Testing net (#0)
I1022 11:34:28.350219 8564 solver.cpp:408] Test net output #0: accuracy = 0.5997
I1022 11:34:28.350253 8564 solver.cpp:408] Test net output #1: loss = 1.14273 (* 1 = 1.14273 loss)
I1022 11:34:28.411208 8564 solver.cpp:236] Iteration 23000, loss = 0.838659
I1022 11:34:28.411240 8564 solver.cpp:252] Train net output #0: loss = 0.838659 (* 1 = 0.838659 loss)
I1022 11:34:28.411245 8564 sgd_solver.cpp:106] Iteration 23000, lr = 0.001
I1022 11:34:39.491907 8564 solver.cpp:236] Iteration 23100, loss = 0.757982
I1022 11:34:39.491943 8564 solver.cpp:252] Train net output #0: loss = 0.757982 (* 1 = 0.757982 loss)
I1022 11:34:39.491950 8564 sgd_solver.cpp:106] Iteration 23100, lr = 0.001
I1022 11:34:50.578348 8564 solver.cpp:236] Iteration 23200, loss = 0.943172
I1022 11:34:50.578438 8564 solver.cpp:252] Train net output #0: loss = 0.943172 (* 1 = 0.943172 loss)
I1022 11:34:50.578444 8564 sgd_solver.cpp:106] Iteration 23200, lr = 0.001
I1022 11:35:01.657322 8564 solver.cpp:236] Iteration 23300, loss = 0.737682
I1022 11:35:01.657358 8564 solver.cpp:252] Train net output #0: loss = 0.737682 (* 1 = 0.737682 loss)
I1022 11:35:01.657362 8564 sgd_solver.cpp:106] Iteration 23300, lr = 0.001
I1022 11:35:12.263994 8564 solver.cpp:236] Iteration 23400, loss = 0.704163
I1022 11:35:12.264029 8564 solver.cpp:252] Train net output #0: loss = 0.704163 (* 1 = 0.704163 loss)
I1022 11:35:12.264035 8564 sgd_solver.cpp:106] Iteration 23400, lr = 0.001
I1022 11:35:23.451151 8564 solver.cpp:236] Iteration 23500, loss = 0.834991
I1022 11:35:23.451241 8564 solver.cpp:252] Train net output #0: loss = 0.834991 (* 1 = 0.834991 loss)
I1022 11:35:23.451246 8564 sgd_solver.cpp:106] Iteration 23500, lr = 0.001
I1022 11:35:34.569589 8564 solver.cpp:236] Iteration 23600, loss = 0.755007
I1022 11:35:34.569615 8564 solver.cpp:252] Train net output #0: loss = 0.755007 (* 1 = 0.755007 loss)
I1022 11:35:34.569622 8564 sgd_solver.cpp:106] Iteration 23600, lr = 0.001
I1022 11:35:45.756487 8564 solver.cpp:236] Iteration 23700, loss = 0.940553
I1022 11:35:45.756525 8564 solver.cpp:252] Train net output #0: loss = 0.940553 (* 1 = 0.940553 loss)
I1022 11:35:45.756531 8564 sgd_solver.cpp:106] Iteration 23700, lr = 0.001
I1022 11:35:56.835918 8564 solver.cpp:236] Iteration 23800, loss = 0.736149
I1022 11:35:56.836004 8564 solver.cpp:252] Train net output #0: loss = 0.736149 (* 1 = 0.736149 loss)
I1022 11:35:56.836009 8564 sgd_solver.cpp:106] Iteration 23800, lr = 0.001
I1022 11:36:07.916920 8564 solver.cpp:236] Iteration 23900, loss = 0.703207
I1022 11:36:07.916954 8564 solver.cpp:252] Train net output #0: loss = 0.703207 (* 1 = 0.703207 loss)
I1022 11:36:07.916959 8564 sgd_solver.cpp:106] Iteration 23900, lr = 0.001
I1022 11:36:18.925489 8564 solver.cpp:340] Iteration 24000, Testing net (#0)
I1022 11:36:24.265583 8564 solver.cpp:408] Test net output #0: accuracy = 0.6033
I1022 11:36:24.265617 8564 solver.cpp:408] Test net output #1: loss = 1.13033 (* 1 = 1.13033 loss)
I1022 11:36:24.326654 8564 solver.cpp:236] Iteration 24000, loss = 0.831267
I1022 11:36:24.326686 8564 solver.cpp:252] Train net output #0: loss = 0.831267 (* 1 = 0.831267 loss)
I1022 11:36:24.326691 8564 sgd_solver.cpp:106] Iteration 24000, lr = 0.001
I1022 11:36:35.457151 8564 solver.cpp:236] Iteration 24100, loss = 0.752061
I1022 11:36:35.457238 8564 solver.cpp:252] Train net output #0: loss = 0.752061 (* 1 = 0.752061 loss)
I1022 11:36:35.457244 8564 sgd_solver.cpp:106] Iteration 24100, lr = 0.001
I1022 11:36:46.583427 8564 solver.cpp:236] Iteration 24200, loss = 0.938287
I1022 11:36:46.583462 8564 solver.cpp:252] Train net output #0: loss = 0.938287 (* 1 = 0.938287 loss)
I1022 11:36:46.583468 8564 sgd_solver.cpp:106] Iteration 24200, lr = 0.001
I1022 11:36:57.236263 8564 solver.cpp:236] Iteration 24300, loss = 0.734244
I1022 11:36:57.236289 8564 solver.cpp:252] Train net output #0: loss = 0.734244 (* 1 = 0.734244 loss)
I1022 11:36:57.236295 8564 sgd_solver.cpp:106] Iteration 24300, lr = 0.001
I1022 11:37:08.289324 8564 solver.cpp:236] Iteration 24400, loss = 0.70257
I1022 11:37:08.289400 8564 solver.cpp:252] Train net output #0: loss = 0.70257 (* 1 = 0.70257 loss)
I1022 11:37:08.289407 8564 sgd_solver.cpp:106] Iteration 24400, lr = 0.001
I1022 11:37:19.383482 8564 solver.cpp:236] Iteration 24500, loss = 0.827955
I1022 11:37:19.383508 8564 solver.cpp:252] Train net output #0: loss = 0.827955 (* 1 = 0.827955 loss)
I1022 11:37:19.383514 8564 sgd_solver.cpp:106] Iteration 24500, lr = 0.001
I1022 11:37:30.461596 8564 solver.cpp:236] Iteration 24600, loss = 0.748821
I1022 11:37:30.461632 8564 solver.cpp:252] Train net output #0: loss = 0.748821 (* 1 = 0.748821 loss)
I1022 11:37:30.461637 8564 sgd_solver.cpp:106] Iteration 24600, lr = 0.001
I1022 11:37:41.538147 8564 solver.cpp:236] Iteration 24700, loss = 0.93621
I1022 11:37:41.538231 8564 solver.cpp:252] Train net output #0: loss = 0.93621 (* 1 = 0.93621 loss)
I1022 11:37:41.538238 8564 sgd_solver.cpp:106] Iteration 24700, lr = 0.001
I1022 11:37:52.664242 8564 solver.cpp:236] Iteration 24800, loss = 0.732527
I1022 11:37:52.664278 8564 solver.cpp:252] Train net output #0: loss = 0.732527 (* 1 = 0.732527 loss)
I1022 11:37:52.664284 8564 sgd_solver.cpp:106] Iteration 24800, lr = 0.001
I1022 11:38:03.848577 8564 solver.cpp:236] Iteration 24900, loss = 0.701903
I1022 11:38:03.848613 8564 solver.cpp:252] Train net output #0: loss = 0.701903 (* 1 = 0.701903 loss)
I1022 11:38:03.848618 8564 sgd_solver.cpp:106] Iteration 24900, lr = 0.001
I1022 11:38:14.962733 8564 solver.cpp:340] Iteration 25000, Testing net (#0)
I1022 11:38:20.346894 8564 solver.cpp:408] Test net output #0: accuracy = 0.6041
I1022 11:38:20.346930 8564 solver.cpp:408] Test net output #1: loss = 1.12697 (* 1 = 1.12697 loss)
I1022 11:38:20.407196 8564 solver.cpp:236] Iteration 25000, loss = 0.825223
I1022 11:38:20.407218 8564 solver.cpp:252] Train net output #0: loss = 0.825223 (* 1 = 0.825223 loss)
I1022 11:38:20.407223 8564 sgd_solver.cpp:106] Iteration 25000, lr = 0.001
I1022 11:38:31.484741 8564 solver.cpp:236] Iteration 25100, loss = 0.746047
I1022 11:38:31.484776 8564 solver.cpp:252] Train net output #0: loss = 0.746047 (* 1 = 0.746047 loss)
I1022 11:38:31.484782 8564 sgd_solver.cpp:106] Iteration 25100, lr = 0.001
I1022 11:38:42.700664 8564 solver.cpp:236] Iteration 25200, loss = 0.933658
I1022 11:38:42.700695 8564 solver.cpp:252] Train net output #0: loss = 0.933658 (* 1 = 0.933658 loss)
I1022 11:38:42.700700 8564 sgd_solver.cpp:106] Iteration 25200, lr = 0.001
I1022 11:38:53.403936 8564 solver.cpp:236] Iteration 25300, loss = 0.730973
I1022 11:38:53.404041 8564 solver.cpp:252] Train net output #0: loss = 0.730973 (* 1 = 0.730973 loss)
I1022 11:38:53.404047 8564 sgd_solver.cpp:106] Iteration 25300, lr = 0.001
I1022 11:39:04.652273 8564 solver.cpp:236] Iteration 25400, loss = 0.701237
I1022 11:39:04.652310 8564 solver.cpp:252] Train net output #0: loss = 0.701237 (* 1 = 0.701237 loss)
I1022 11:39:04.652315 8564 sgd_solver.cpp:106] Iteration 25400, lr = 0.001
I1022 11:39:15.846827 8564 solver.cpp:236] Iteration 25500, loss = 0.822009
I1022 11:39:15.846861 8564 solver.cpp:252] Train net output #0: loss = 0.822009 (* 1 = 0.822009 loss)
I1022 11:39:15.846868 8564 sgd_solver.cpp:106] Iteration 25500, lr = 0.001
I1022 11:39:26.928272 8564 solver.cpp:236] Iteration 25600, loss = 0.743156
I1022 11:39:26.928349 8564 solver.cpp:252] Train net output #0: loss = 0.743156 (* 1 = 0.743156 loss)
I1022 11:39:26.928356 8564 sgd_solver.cpp:106] Iteration 25600, lr = 0.001
I1022 11:39:38.012732 8564 solver.cpp:236] Iteration 25700, loss = 0.931252
I1022 11:39:38.012768 8564 solver.cpp:252] Train net output #0: loss = 0.931252 (* 1 = 0.931252 loss)
I1022 11:39:38.012773 8564 sgd_solver.cpp:106] Iteration 25700, lr = 0.001
I1022 11:39:49.334939 8564 solver.cpp:236] Iteration 25800, loss = 0.729736
I1022 11:39:49.334977 8564 solver.cpp:252] Train net output #0: loss = 0.729736 (* 1 = 0.729736 loss)
I1022 11:39:49.334983 8564 sgd_solver.cpp:106] Iteration 25800, lr = 0.001
I1022 11:40:00.471437 8564 solver.cpp:236] Iteration 25900, loss = 0.700592
I1022 11:40:00.471545 8564 solver.cpp:252] Train net output #0: loss = 0.700592 (* 1 = 0.700592 loss)
I1022 11:40:00.471560 8564 sgd_solver.cpp:106] Iteration 25900, lr = 0.001
I1022 11:40:11.500015 8564 solver.cpp:340] Iteration 26000, Testing net (#0)
I1022 11:40:16.842622 8564 solver.cpp:408] Test net output #0: accuracy = 0.6068
I1022 11:40:16.842646 8564 solver.cpp:408] Test net output #1: loss = 1.12279 (* 1 = 1.12279 loss)
I1022 11:40:16.903074 8564 solver.cpp:236] Iteration 26000, loss = 0.819177
I1022 11:40:16.903106 8564 solver.cpp:252] Train net output #0: loss = 0.819177 (* 1 = 0.819177 loss)
I1022 11:40:16.903111 8564 sgd_solver.cpp:106] Iteration 26000, lr = 0.001
I1022 11:40:27.986927 8564 solver.cpp:236] Iteration 26100, loss = 0.740474
I1022 11:40:27.986963 8564 solver.cpp:252] Train net output #0: loss = 0.740474 (* 1 = 0.740474 loss)
I1022 11:40:27.986968 8564 sgd_solver.cpp:106] Iteration 26100, lr = 0.001
I1022 11:40:38.552723 8564 solver.cpp:236] Iteration 26200, loss = 0.928696
I1022 11:40:38.552841 8564 solver.cpp:252] Train net output #0: loss = 0.928696 (* 1 = 0.928696 loss)
I1022 11:40:38.552846 8564 sgd_solver.cpp:106] Iteration 26200, lr = 0.001
I1022 11:40:49.637251 8564 solver.cpp:236] Iteration 26300, loss = 0.728425
I1022 11:40:49.637286 8564 solver.cpp:252] Train net output #0: loss = 0.728425 (* 1 = 0.728425 loss)
I1022 11:40:49.637292 8564 sgd_solver.cpp:106] Iteration 26300, lr = 0.001
I1022 11:41:00.718488 8564 solver.cpp:236] Iteration 26400, loss = 0.700227
I1022 11:41:00.718523 8564 solver.cpp:252] Train net output #0: loss = 0.700227 (* 1 = 0.700227 loss)
I1022 11:41:00.718529 8564 sgd_solver.cpp:106] Iteration 26400, lr = 0.001
I1022 11:41:11.802881 8564 solver.cpp:236] Iteration 26500, loss = 0.816485
I1022 11:41:11.802988 8564 solver.cpp:252] Train net output #0: loss = 0.816485 (* 1 = 0.816485 loss)
I1022 11:41:11.803002 8564 sgd_solver.cpp:106] Iteration 26500, lr = 0.001
I1022 11:41:22.868739 8564 solver.cpp:236] Iteration 26600, loss = 0.738439
I1022 11:41:22.868774 8564 solver.cpp:252] Train net output #0: loss = 0.738439 (* 1 = 0.738439 loss)
I1022 11:41:22.868780 8564 sgd_solver.cpp:106] Iteration 26600, lr = 0.001
I1022 11:41:33.944108 8564 solver.cpp:236] Iteration 26700, loss = 0.926261
I1022 11:41:33.944144 8564 solver.cpp:252] Train net output #0: loss = 0.926261 (* 1 = 0.926261 loss)
I1022 11:41:33.944150 8564 sgd_solver.cpp:106] Iteration 26700, lr = 0.001
I1022 11:41:45.022961 8564 solver.cpp:236] Iteration 26800, loss = 0.726731
I1022 11:41:45.023020 8564 solver.cpp:252] Train net output #0: loss = 0.726731 (* 1 = 0.726731 loss)
I1022 11:41:45.023026 8564 sgd_solver.cpp:106] Iteration 26800, lr = 0.001
I1022 11:41:56.095005 8564 solver.cpp:236] Iteration 26900, loss = 0.699937
I1022 11:41:56.095031 8564 solver.cpp:252] Train net output #0: loss = 0.699937 (* 1 = 0.699937 loss)
I1022 11:41:56.095037 8564 sgd_solver.cpp:106] Iteration 26900, lr = 0.001
I1022 11:42:07.107566 8564 solver.cpp:340] Iteration 27000, Testing net (#0)
I1022 11:42:12.445190 8564 solver.cpp:408] Test net output #0: accuracy = 0.6064
I1022 11:42:12.445225 8564 solver.cpp:408] Test net output #1: loss = 1.1241 (* 1 = 1.1241 loss)
I1022 11:42:12.509949 8564 solver.cpp:236] Iteration 27000, loss = 0.813889
I1022 11:42:12.509969 8564 solver.cpp:252] Train net output #0: loss = 0.813889 (* 1 = 0.813889 loss)
I1022 11:42:12.509975 8564 sgd_solver.cpp:106] Iteration 27000, lr = 0.001
I1022 11:42:23.462714 8564 solver.cpp:236] Iteration 27100, loss = 0.736272
I1022 11:42:23.462770 8564 solver.cpp:252] Train net output #0: loss = 0.736272 (* 1 = 0.736272 loss)
I1022 11:42:23.462776 8564 sgd_solver.cpp:106] Iteration 27100, lr = 0.001
I1022 11:42:34.140887 8564 solver.cpp:236] Iteration 27200, loss = 0.923642
I1022 11:42:34.140914 8564 solver.cpp:252] Train net output #0: loss = 0.923642 (* 1 = 0.923642 loss)
I1022 11:42:34.140919 8564 sgd_solver.cpp:106] Iteration 27200, lr = 0.001
I1022 11:42:45.218492 8564 solver.cpp:236] Iteration 27300, loss = 0.725376
I1022 11:42:45.218525 8564 solver.cpp:252] Train net output #0: loss = 0.725376 (* 1 = 0.725376 loss)
I1022 11:42:45.218530 8564 sgd_solver.cpp:106] Iteration 27300, lr = 0.001
I1022 11:42:56.288832 8564 solver.cpp:236] Iteration 27400, loss = 0.699479
I1022 11:42:56.288938 8564 solver.cpp:252] Train net output #0: loss = 0.699479 (* 1 = 0.699479 loss)
I1022 11:42:56.288952 8564 sgd_solver.cpp:106] Iteration 27400, lr = 0.001
I1022 11:43:07.361426 8564 solver.cpp:236] Iteration 27500, loss = 0.811632
I1022 11:43:07.361460 8564 solver.cpp:252] Train net output #0: loss = 0.811632 (* 1 = 0.811632 loss)
I1022 11:43:07.361464 8564 sgd_solver.cpp:106] Iteration 27500, lr = 0.001
I1022 11:43:18.437834 8564 solver.cpp:236] Iteration 27600, loss = 0.733876
I1022 11:43:18.437870 8564 solver.cpp:252] Train net output #0: loss = 0.733876 (* 1 = 0.733876 loss)
I1022 11:43:18.437875 8564 sgd_solver.cpp:106] Iteration 27600, lr = 0.001
I1022 11:43:29.509420 8564 solver.cpp:236] Iteration 27700, loss = 0.921256
I1022 11:43:29.509500 8564 solver.cpp:252] Train net output #0: loss = 0.921256 (* 1 = 0.921256 loss)
I1022 11:43:29.509506 8564 sgd_solver.cpp:106] Iteration 27700, lr = 0.001
I1022 11:43:40.585213 8564 solver.cpp:236] Iteration 27800, loss = 0.724267
I1022 11:43:40.585239 8564 solver.cpp:252] Train net output #0: loss = 0.724267 (* 1 = 0.724267 loss)
I1022 11:43:40.585245 8564 sgd_solver.cpp:106] Iteration 27800, lr = 0.001
I1022 11:43:51.663720 8564 solver.cpp:236] Iteration 27900, loss = 0.699296
I1022 11:43:51.663745 8564 solver.cpp:252] Train net output #0: loss = 0.699296 (* 1 = 0.699296 loss)
I1022 11:43:51.663750 8564 sgd_solver.cpp:106] Iteration 27900, lr = 0.001
I1022 11:44:02.674868 8564 solver.cpp:340] Iteration 28000, Testing net (#0)
I1022 11:44:08.014457 8564 solver.cpp:408] Test net output #0: accuracy = 0.607
I1022 11:44:08.014492 8564 solver.cpp:408] Test net output #1: loss = 1.12152 (* 1 = 1.12152 loss)
I1022 11:44:08.077186 8564 solver.cpp:236] Iteration 28000, loss = 0.809138
I1022 11:44:08.077211 8564 solver.cpp:252] Train net output #0: loss = 0.809138 (* 1 = 0.809138 loss)
I1022 11:44:08.077217 8564 sgd_solver.cpp:106] Iteration 28000, lr = 0.001
I1022 11:44:18.640517 8564 solver.cpp:236] Iteration 28100, loss = 0.731815
I1022 11:44:18.640543 8564 solver.cpp:252] Train net output #0: loss = 0.731815 (* 1 = 0.731815 loss)
I1022 11:44:18.640549 8564 sgd_solver.cpp:106] Iteration 28100, lr = 0.001
I1022 11:44:29.718418 8564 solver.cpp:236] Iteration 28200, loss = 0.919272
I1022 11:44:29.718454 8564 solver.cpp:252] Train net output #0: loss = 0.919272 (* 1 = 0.919272 loss)
I1022 11:44:29.718461 8564 sgd_solver.cpp:106] Iteration 28200, lr = 0.001
I1022 11:44:40.796396 8564 solver.cpp:236] Iteration 28300, loss = 0.723178
I1022 11:44:40.796490 8564 solver.cpp:252] Train net output #0: loss = 0.723178 (* 1 = 0.723178 loss)
I1022 11:44:40.796496 8564 sgd_solver.cpp:106] Iteration 28300, lr = 0.001
I1022 11:44:51.901971 8564 solver.cpp:236] Iteration 28400, loss = 0.698887
I1022 11:44:51.902007 8564 solver.cpp:252] Train net output #0: loss = 0.698887 (* 1 = 0.698887 loss)
I1022 11:44:51.902012 8564 sgd_solver.cpp:106] Iteration 28400, lr = 0.001
I1022 11:45:03.053115 8564 solver.cpp:236] Iteration 28500, loss = 0.807208
I1022 11:45:03.053153 8564 solver.cpp:252] Train net output #0: loss = 0.807208 (* 1 = 0.807208 loss)
I1022 11:45:03.053158 8564 sgd_solver.cpp:106] Iteration 28500, lr = 0.001
I1022 11:45:14.136247 8564 solver.cpp:236] Iteration 28600, loss = 0.729844
I1022 11:45:14.136340 8564 solver.cpp:252] Train net output #0: loss = 0.729844 (* 1 = 0.729844 loss)
I1022 11:45:14.136355 8564 sgd_solver.cpp:106] Iteration 28600, lr = 0.001
I1022 11:45:25.517149 8564 solver.cpp:236] Iteration 28700, loss = 0.917208
I1022 11:45:25.517184 8564 solver.cpp:252] Train net output #0: loss = 0.917208 (* 1 = 0.917208 loss)
I1022 11:45:25.517189 8564 sgd_solver.cpp:106] Iteration 28700, lr = 0.001
I1022 11:45:36.650207 8564 solver.cpp:236] Iteration 28800, loss = 0.722189
I1022 11:45:36.650243 8564 solver.cpp:252] Train net output #0: loss = 0.722189 (* 1 = 0.722189 loss)
I1022 11:45:36.650249 8564 sgd_solver.cpp:106] Iteration 28800, lr = 0.001
I1022 11:45:47.943670 8564 solver.cpp:236] Iteration 28900, loss = 0.698848
I1022 11:45:47.943789 8564 solver.cpp:252] Train net output #0: loss = 0.698848 (* 1 = 0.698848 loss)
I1022 11:45:47.943802 8564 sgd_solver.cpp:106] Iteration 28900, lr = 0.001
I1022 11:45:59.067703 8564 solver.cpp:340] Iteration 29000, Testing net (#0)
I1022 11:46:04.197167 8564 solver.cpp:408] Test net output #0: accuracy = 0.6066
I1022 11:46:04.197201 8564 solver.cpp:408] Test net output #1: loss = 1.12055 (* 1 = 1.12055 loss)
I1022 11:46:04.253844 8564 solver.cpp:236] Iteration 29000, loss = 0.805276
I1022 11:46:04.253864 8564 solver.cpp:252] Train net output #0: loss = 0.805276 (* 1 = 0.805276 loss)
I1022 11:46:04.253870 8564 sgd_solver.cpp:106] Iteration 29000, lr = 0.001
I1022 11:46:15.100793 8564 solver.cpp:236] Iteration 29100, loss = 0.727829
I1022 11:46:15.100821 8564 solver.cpp:252] Train net output #0: loss = 0.727829 (* 1 = 0.727829 loss)
I1022 11:46:15.100826 8564 sgd_solver.cpp:106] Iteration 29100, lr = 0.001
I1022 11:46:26.232477 8564 solver.cpp:236] Iteration 29200, loss = 0.914706
I1022 11:46:26.232568 8564 solver.cpp:252] Train net output #0: loss = 0.914706 (* 1 = 0.914706 loss)
I1022 11:46:26.232573 8564 sgd_solver.cpp:106] Iteration 29200, lr = 0.001
I1022 11:46:37.359396 8564 solver.cpp:236] Iteration 29300, loss = 0.721268
I1022 11:46:37.359431 8564 solver.cpp:252] Train net output #0: loss = 0.721268 (* 1 = 0.721268 loss)
I1022 11:46:37.359437 8564 sgd_solver.cpp:106] Iteration 29300, lr = 0.001
I1022 11:46:48.493046 8564 solver.cpp:236] Iteration 29400, loss = 0.698388
I1022 11:46:48.493074 8564 solver.cpp:252] Train net output #0: loss = 0.698388 (* 1 = 0.698388 loss)
I1022 11:46:48.493080 8564 sgd_solver.cpp:106] Iteration 29400, lr = 0.001
I1022 11:46:59.621747 8564 solver.cpp:236] Iteration 29500, loss = 0.803337
I1022 11:46:59.621840 8564 solver.cpp:252] Train net output #0: loss = 0.803337 (* 1 = 0.803337 loss)
I1022 11:46:59.621846 8564 sgd_solver.cpp:106] Iteration 29500, lr = 0.001
I1022 11:47:10.850951 8564 solver.cpp:236] Iteration 29600, loss = 0.725892
I1022 11:47:10.850978 8564 solver.cpp:252] Train net output #0: loss = 0.725892 (* 1 = 0.725892 loss)
I1022 11:47:10.850985 8564 sgd_solver.cpp:106] Iteration 29600, lr = 0.001
I1022 11:47:21.993129 8564 solver.cpp:236] Iteration 29700, loss = 0.912513
I1022 11:47:21.993155 8564 solver.cpp:252] Train net output #0: loss = 0.912513 (* 1 = 0.912513 loss)
I1022 11:47:21.993161 8564 sgd_solver.cpp:106] Iteration 29700, lr = 0.001
I1022 11:47:33.122323 8564 solver.cpp:236] Iteration 29800, loss = 0.72026
I1022 11:47:33.122427 8564 solver.cpp:252] Train net output #0: loss = 0.72026 (* 1 = 0.72026 loss)
I1022 11:47:33.122433 8564 sgd_solver.cpp:106] Iteration 29800, lr = 0.001
I1022 11:47:44.294265 8564 solver.cpp:236] Iteration 29900, loss = 0.698303
I1022 11:47:44.294301 8564 solver.cpp:252] Train net output #0: loss = 0.698303 (* 1 = 0.698303 loss)
I1022 11:47:44.294306 8564 sgd_solver.cpp:106] Iteration 29900, lr = 0.001
I1022 11:47:55.107378 8564 solver.cpp:340] Iteration 30000, Testing net (#0)
I1022 11:48:00.266275 8564 solver.cpp:408] Test net output #0: accuracy = 0.6067
I1022 11:48:00.266300 8564 solver.cpp:408] Test net output #1: loss = 1.11859 (* 1 = 1.11859 loss)
I1022 11:48:00.326344 8564 solver.cpp:236] Iteration 30000, loss = 0.801821
I1022 11:48:00.326373 8564 solver.cpp:252] Train net output #0: loss = 0.801821 (* 1 = 0.801821 loss)
I1022 11:48:00.326377 8564 sgd_solver.cpp:46] MultiStep Status: Iteration 30000, step = 1
I1022 11:48:00.326380 8564 sgd_solver.cpp:106] Iteration 30000, lr = 0.0001
I1022 11:48:11.492182 8564 solver.cpp:236] Iteration 30100, loss = 0.622436
I1022 11:48:11.492238 8564 solver.cpp:252] Train net output #0: loss = 0.622436 (* 1 = 0.622436 loss)
I1022 11:48:11.492244 8564 sgd_solver.cpp:106] Iteration 30100, lr = 0.0001
I1022 11:48:22.675348 8564 solver.cpp:236] Iteration 30200, loss = 0.835799
I1022 11:48:22.675382 8564 solver.cpp:252] Train net output #0: loss = 0.835799 (* 1 = 0.835799 loss)
I1022 11:48:22.675389 8564 sgd_solver.cpp:106] Iteration 30200, lr = 0.0001
I1022 11:48:33.874954 8564 solver.cpp:236] Iteration 30300, loss = 0.701128
I1022 11:48:33.874990 8564 solver.cpp:252] Train net output #0: loss = 0.701128 (* 1 = 0.701128 loss)
I1022 11:48:33.874995 8564 sgd_solver.cpp:106] Iteration 30300, lr = 0.0001
I1022 11:48:44.991941 8564 solver.cpp:236] Iteration 30400, loss = 0.640916
I1022 11:48:44.992039 8564 solver.cpp:252] Train net output #0: loss = 0.640916 (* 1 = 0.640916 loss)
I1022 11:48:44.992045 8564 sgd_solver.cpp:106] Iteration 30400, lr = 0.0001
I1022 11:48:56.155804 8564 solver.cpp:236] Iteration 30500, loss = 0.7662
I1022 11:48:56.155830 8564 solver.cpp:252] Train net output #0: loss = 0.7662 (* 1 = 0.7662 loss)
I1022 11:48:56.155835 8564 sgd_solver.cpp:106] Iteration 30500, lr = 0.0001
I1022 11:49:07.312994 8564 solver.cpp:236] Iteration 30600, loss = 0.613073
I1022 11:49:07.313030 8564 solver.cpp:252] Train net output #0: loss = 0.613073 (* 1 = 0.613073 loss)
I1022 11:49:07.313037 8564 sgd_solver.cpp:106] Iteration 30600, lr = 0.0001
I1022 11:49:18.473657 8564 solver.cpp:236] Iteration 30700, loss = 0.82724
I1022 11:49:18.473747 8564 solver.cpp:252] Train net output #0: loss = 0.82724 (* 1 = 0.82724 loss)
I1022 11:49:18.473753 8564 sgd_solver.cpp:106] Iteration 30700, lr = 0.0001
I1022 11:49:29.647655 8564 solver.cpp:236] Iteration 30800, loss = 0.697973
I1022 11:49:29.647689 8564 solver.cpp:252] Train net output #0: loss = 0.697973 (* 1 = 0.697973 loss)
I1022 11:49:29.647694 8564 sgd_solver.cpp:106] Iteration 30800, lr = 0.0001
I1022 11:49:40.797430 8564 solver.cpp:236] Iteration 30900, loss = 0.640463
I1022 11:49:40.797466 8564 solver.cpp:252] Train net output #0: loss = 0.640463 (* 1 = 0.640463 loss)
I1022 11:49:40.797472 8564 sgd_solver.cpp:106] Iteration 30900, lr = 0.0001
I1022 11:49:51.474588 8564 solver.cpp:340] Iteration 31000, Testing net (#0)
I1022 11:49:56.904474 8564 solver.cpp:408] Test net output #0: accuracy = 0.698
I1022 11:49:56.904500 8564 solver.cpp:408] Test net output #1: loss = 0.89011 (* 1 = 0.89011 loss)
I1022 11:49:56.965456 8564 solver.cpp:236] Iteration 31000, loss = 0.760433
I1022 11:49:56.965490 8564 solver.cpp:252] Train net output #0: loss = 0.760433 (* 1 = 0.760433 loss)
I1022 11:49:56.965497 8564 sgd_solver.cpp:106] Iteration 31000, lr = 0.0001
I1022 11:50:08.120568 8564 solver.cpp:236] Iteration 31100, loss = 0.610041
I1022 11:50:08.120604 8564 solver.cpp:252] Train net output #0: loss = 0.610041 (* 1 = 0.610041 loss)
I1022 11:50:08.120609 8564 sgd_solver.cpp:106] Iteration 31100, lr = 0.0001
I1022 11:50:19.286280 8564 solver.cpp:236] Iteration 31200, loss = 0.824354
I1022 11:50:19.286329 8564 solver.cpp:252] Train net output #0: loss = 0.824354 (* 1 = 0.824354 loss)
I1022 11:50:19.286336 8564 sgd_solver.cpp:106] Iteration 31200, lr = 0.0001
I1022 11:50:30.454028 8564 solver.cpp:236] Iteration 31300, loss = 0.695509
I1022 11:50:30.454115 8564 solver.cpp:252] Train net output #0: loss = 0.695509 (* 1 = 0.695509 loss)
I1022 11:50:30.454121 8564 sgd_solver.cpp:106] Iteration 31300, lr = 0.0001
I1022 11:50:41.572471 8564 solver.cpp:236] Iteration 31400, loss = 0.640334
I1022 11:50:41.572505 8564 solver.cpp:252] Train net output #0: loss = 0.640334 (* 1 = 0.640334 loss)
I1022 11:50:41.572510 8564 sgd_solver.cpp:106] Iteration 31400, lr = 0.0001
I1022 11:50:52.692456 8564 solver.cpp:236] Iteration 31500, loss = 0.755575
I1022 11:50:52.692490 8564 solver.cpp:252] Train net output #0: loss = 0.755575 (* 1 = 0.755575 loss)
I1022 11:50:52.692497 8564 sgd_solver.cpp:106] Iteration 31500, lr = 0.0001
I1022 11:51:03.806608 8564 solver.cpp:236] Iteration 31600, loss = 0.607259
I1022 11:51:03.806701 8564 solver.cpp:252] Train net output #0: loss = 0.607259 (* 1 = 0.607259 loss)
I1022 11:51:03.806707 8564 sgd_solver.cpp:106] Iteration 31600, lr = 0.0001
I1022 11:51:14.951393 8564 solver.cpp:236] Iteration 31700, loss = 0.822136
I1022 11:51:14.951429 8564 solver.cpp:252] Train net output #0: loss = 0.822136 (* 1 = 0.822136 loss)
I1022 11:51:14.951434 8564 sgd_solver.cpp:106] Iteration 31700, lr = 0.0001
I1022 11:51:26.088932 8564 solver.cpp:236] Iteration 31800, loss = 0.693714
I1022 11:51:26.088958 8564 solver.cpp:252] Train net output #0: loss = 0.693714 (* 1 = 0.693714 loss)
I1022 11:51:26.088963 8564 sgd_solver.cpp:106] Iteration 31800, lr = 0.0001
I1022 11:51:36.769866 8564 solver.cpp:236] Iteration 31900, loss = 0.640403
I1022 11:51:36.769934 8564 solver.cpp:252] Train net output #0: loss = 0.640403 (* 1 = 0.640403 loss)
I1022 11:51:36.769940 8564 sgd_solver.cpp:106] Iteration 31900, lr = 0.0001
I1022 11:51:47.784500 8564 solver.cpp:340] Iteration 32000, Testing net (#0)
I1022 11:51:53.157312 8564 solver.cpp:408] Test net output #0: accuracy = 0.6981
I1022 11:51:53.157338 8564 solver.cpp:408] Test net output #1: loss = 0.886714 (* 1 = 0.886714 loss)
I1022 11:51:53.217130 8564 solver.cpp:236] Iteration 32000, loss = 0.751232
I1022 11:51:53.217154 8564 solver.cpp:252] Train net output #0: loss = 0.751232 (* 1 = 0.751232 loss)
I1022 11:51:53.217157 8564 sgd_solver.cpp:106] Iteration 32000, lr = 0.0001
I1022 11:52:04.356755 8564 solver.cpp:236] Iteration 32100, loss = 0.604974
I1022 11:52:04.356791 8564 solver.cpp:252] Train net output #0: loss = 0.604974 (* 1 = 0.604974 loss)
I1022 11:52:04.356797 8564 sgd_solver.cpp:106] Iteration 32100, lr = 0.0001
I1022 11:52:15.493125 8564 solver.cpp:236] Iteration 32200, loss = 0.820321
I1022 11:52:15.493232 8564 solver.cpp:252] Train net output #0: loss = 0.820321 (* 1 = 0.820321 loss)
I1022 11:52:15.493248 8564 sgd_solver.cpp:106] Iteration 32200, lr = 0.0001
I1022 11:52:26.634297 8564 solver.cpp:236] Iteration 32300, loss = 0.692454
I1022 11:52:26.634323 8564 solver.cpp:252] Train net output #0: loss = 0.692454 (* 1 = 0.692454 loss)
I1022 11:52:26.634328 8564 sgd_solver.cpp:106] Iteration 32300, lr = 0.0001
I1022 11:52:37.799424 8564 solver.cpp:236] Iteration 32400, loss = 0.640601
I1022 11:52:37.799459 8564 solver.cpp:252] Train net output #0: loss = 0.640601 (* 1 = 0.640601 loss)
I1022 11:52:37.799464 8564 sgd_solver.cpp:106] Iteration 32400, lr = 0.0001
I1022 11:52:48.946633 8564 solver.cpp:236] Iteration 32500, loss = 0.747267
I1022 11:52:48.946722 8564 solver.cpp:252] Train net output #0: loss = 0.747267 (* 1 = 0.747267 loss)
I1022 11:52:48.946727 8564 sgd_solver.cpp:106] Iteration 32500, lr = 0.0001
I1022 11:53:00.080404 8564 solver.cpp:236] Iteration 32600, loss = 0.602863
I1022 11:53:00.080440 8564 solver.cpp:252] Train net output #0: loss = 0.602863 (* 1 = 0.602863 loss)
I1022 11:53:00.080446 8564 sgd_solver.cpp:106] Iteration 32600, lr = 0.0001
I1022 11:53:11.218451 8564 solver.cpp:236] Iteration 32700, loss = 0.818763
I1022 11:53:11.218485 8564 solver.cpp:252] Train net output #0: loss = 0.818763 (* 1 = 0.818763 loss)
I1022 11:53:11.218492 8564 sgd_solver.cpp:106] Iteration 32700, lr = 0.0001
I1022 11:53:22.386447 8564 solver.cpp:236] Iteration 32800, loss = 0.691561
I1022 11:53:22.386544 8564 solver.cpp:252] Train net output #0: loss = 0.691561 (* 1 = 0.691561 loss)
I1022 11:53:22.386559 8564 sgd_solver.cpp:106] Iteration 32800, lr = 0.0001
I1022 11:53:32.999651 8564 solver.cpp:236] Iteration 32900, loss = 0.640797
I1022 11:53:32.999677 8564 solver.cpp:252] Train net output #0: loss = 0.640797 (* 1 = 0.640797 loss)
I1022 11:53:32.999681 8564 sgd_solver.cpp:106] Iteration 32900, lr = 0.0001
I1022 11:53:44.102501 8564 solver.cpp:340] Iteration 33000, Testing net (#0)
I1022 11:53:49.633316 8564 solver.cpp:408] Test net output #0: accuracy = 0.7001
I1022 11:53:49.633379 8564 solver.cpp:408] Test net output #1: loss = 0.874897 (* 1 = 0.874897 loss)
I1022 11:53:49.697208 8564 solver.cpp:236] Iteration 33000, loss = 0.74367
I1022 11:53:49.697249 8564 solver.cpp:252] Train net output #0: loss = 0.74367 (* 1 = 0.74367 loss)
I1022 11:53:49.697266 8564 sgd_solver.cpp:106] Iteration 33000, lr = 0.0001
I1022 11:54:00.877634 8564 solver.cpp:236] Iteration 33100, loss = 0.600944
I1022 11:54:00.877754 8564 solver.cpp:252] Train net output #0: loss = 0.600944 (* 1 = 0.600944 loss)
I1022 11:54:00.877760 8564 sgd_solver.cpp:106] Iteration 33100, lr = 0.0001
I1022 11:54:12.002399 8564 solver.cpp:236] Iteration 33200, loss = 0.817458
I1022 11:54:12.002436 8564 solver.cpp:252] Train net output #0: loss = 0.817458 (* 1 = 0.817458 loss)
I1022 11:54:12.002442 8564 sgd_solver.cpp:106] Iteration 33200, lr = 0.0001
I1022 11:54:23.172279 8564 solver.cpp:236] Iteration 33300, loss = 0.690838
I1022 11:54:23.172317 8564 solver.cpp:252] Train net output #0: loss = 0.690838 (* 1 = 0.690838 loss)
I1022 11:54:23.172323 8564 sgd_solver.cpp:106] Iteration 33300, lr = 0.0001
I1022 11:54:34.295259 8564 solver.cpp:236] Iteration 33400, loss = 0.640957
I1022 11:54:34.295341 8564 solver.cpp:252] Train net output #0: loss = 0.640957 (* 1 = 0.640957 loss)
I1022 11:54:34.295356 8564 sgd_solver.cpp:106] Iteration 33400, lr = 0.0001
I1022 11:54:45.470350 8564 solver.cpp:236] Iteration 33500, loss = 0.740471
I1022 11:54:45.470386 8564 solver.cpp:252] Train net output #0: loss = 0.740471 (* 1 = 0.740471 loss)
I1022 11:54:45.470391 8564 sgd_solver.cpp:106] Iteration 33500, lr = 0.0001
I1022 11:54:56.598796 8564 solver.cpp:236] Iteration 33600, loss = 0.599203
I1022 11:54:56.598834 8564 solver.cpp:252] Train net output #0: loss = 0.599203 (* 1 = 0.599203 loss)
I1022 11:54:56.598839 8564 sgd_solver.cpp:106] Iteration 33600, lr = 0.0001
I1022 11:55:07.674783 8564 solver.cpp:236] Iteration 33700, loss = 0.816411
I1022 11:55:07.674871 8564 solver.cpp:252] Train net output #0: loss = 0.816411 (* 1 = 0.816411 loss)
I1022 11:55:07.674876 8564 sgd_solver.cpp:106] Iteration 33700, lr = 0.0001
I1022 11:55:18.246320 8564 solver.cpp:236] Iteration 33800, loss = 0.690261
I1022 11:55:18.246356 8564 solver.cpp:252] Train net output #0: loss = 0.690261 (* 1 = 0.690261 loss)
I1022 11:55:18.246361 8564 sgd_solver.cpp:106] Iteration 33800, lr = 0.0001
I1022 11:55:29.413061 8564 solver.cpp:236] Iteration 33900, loss = 0.641011
I1022 11:55:29.413099 8564 solver.cpp:252] Train net output #0: loss = 0.641011 (* 1 = 0.641011 loss)
I1022 11:55:29.413103 8564 sgd_solver.cpp:106] Iteration 33900, lr = 0.0001
I1022 11:55:40.484966 8564 solver.cpp:340] Iteration 34000, Testing net (#0)
I1022 11:55:45.826992 8564 solver.cpp:408] Test net output #0: accuracy = 0.7038
I1022 11:55:45.827024 8564 solver.cpp:408] Test net output #1: loss = 0.864182 (* 1 = 0.864182 loss)
I1022 11:55:45.888005 8564 solver.cpp:236] Iteration 34000, loss = 0.737492
I1022 11:55:45.888039 8564 solver.cpp:252] Train net output #0: loss = 0.737492 (* 1 = 0.737492 loss)
I1022 11:55:45.888046 8564 sgd_solver.cpp:106] Iteration 34000, lr = 0.0001
I1022 11:55:56.987709 8564 solver.cpp:236] Iteration 34100, loss = 0.597436
I1022 11:55:56.987745 8564 solver.cpp:252] Train net output #0: loss = 0.597436 (* 1 = 0.597436 loss)
I1022 11:55:56.987750 8564 sgd_solver.cpp:106] Iteration 34100, lr = 0.0001
I1022 11:56:08.133744 8564 solver.cpp:236] Iteration 34200, loss = 0.815482
I1022 11:56:08.133779 8564 solver.cpp:252] Train net output #0: loss = 0.815482 (* 1 = 0.815482 loss)
I1022 11:56:08.133785 8564 sgd_solver.cpp:106] Iteration 34200, lr = 0.0001
I1022 11:56:19.492722 8564 solver.cpp:236] Iteration 34300, loss = 0.689794
I1022 11:56:19.492826 8564 solver.cpp:252] Train net output #0: loss = 0.689794 (* 1 = 0.689794 loss)
I1022 11:56:19.492838 8564 sgd_solver.cpp:106] Iteration 34300, lr = 0.0001
I1022 11:56:31.645283 8564 solver.cpp:236] Iteration 34400, loss = 0.641044
I1022 11:56:31.645311 8564 solver.cpp:252] Train net output #0: loss = 0.641044 (* 1 = 0.641044 loss)
I1022 11:56:31.645318 8564 sgd_solver.cpp:106] Iteration 34400, lr = 0.0001
I1022 11:56:42.801470 8564 solver.cpp:236] Iteration 34500, loss = 0.734743
I1022 11:56:42.801506 8564 solver.cpp:252] Train net output #0: loss = 0.734743 (* 1 = 0.734743 loss)
I1022 11:56:42.801512 8564 sgd_solver.cpp:106] Iteration 34500, lr = 0.0001
I1022 11:56:53.931752 8564 solver.cpp:236] Iteration 34600, loss = 0.595749
I1022 11:56:53.931860 8564 solver.cpp:252] Train net output #0: loss = 0.595749 (* 1 = 0.595749 loss)
I1022 11:56:53.931866 8564 sgd_solver.cpp:106] Iteration 34600, lr = 0.0001
I1022 11:57:05.137717 8564 solver.cpp:236] Iteration 34700, loss = 0.814748
I1022 11:57:05.137779 8564 solver.cpp:252] Train net output #0: loss = 0.814748 (* 1 = 0.814748 loss)
I1022 11:57:05.137792 8564 sgd_solver.cpp:106] Iteration 34700, lr = 0.0001
I1022 11:57:16.654422 8564 solver.cpp:236] Iteration 34800, loss = 0.689472
I1022 11:57:16.654458 8564 solver.cpp:252] Train net output #0: loss = 0.689472 (* 1 = 0.689472 loss)
I1022 11:57:16.654464 8564 sgd_solver.cpp:106] Iteration 34800, lr = 0.0001
I1022 11:57:27.834247 8564 solver.cpp:236] Iteration 34900, loss = 0.641037
I1022 11:57:27.834344 8564 solver.cpp:252] Train net output #0: loss = 0.641037 (* 1 = 0.641037 loss)
I1022 11:57:27.834359 8564 sgd_solver.cpp:106] Iteration 34900, lr = 0.0001
I1022 11:57:41.276960 8564 solver.cpp:340] Iteration 35000, Testing net (#0)
I1022 11:57:49.033498 8564 solver.cpp:408] Test net output #0: accuracy = 0.7061
I1022 11:57:49.033550 8564 solver.cpp:408] Test net output #1: loss = 0.855199 (* 1 = 0.855199 loss)
I1022 11:57:49.124814 8564 solver.cpp:236] Iteration 35000, loss = 0.732233
I1022 11:57:49.124850 8564 solver.cpp:252] Train net output #0: loss = 0.732233 (* 1 = 0.732233 loss)
I1022 11:57:49.124855 8564 sgd_solver.cpp:46] MultiStep Status: Iteration 35000, step = 2
I1022 11:57:49.124857 8564 sgd_solver.cpp:106] Iteration 35000, lr = 1e-05
I1022 11:58:03.152465 8564 solver.cpp:236] Iteration 35100, loss = 0.590509
I1022 11:58:03.152554 8564 solver.cpp:252] Train net output #0: loss = 0.590509 (* 1 = 0.590509 loss)
I1022 11:58:03.152567 8564 sgd_solver.cpp:106] Iteration 35100, lr = 1e-05
I1022 11:58:14.294396 8564 solver.cpp:236] Iteration 35200, loss = 0.812733
I1022 11:58:14.294447 8564 solver.cpp:252] Train net output #0: loss = 0.812733 (* 1 = 0.812733 loss)
I1022 11:58:14.294456 8564 sgd_solver.cpp:106] Iteration 35200, lr = 1e-05
I1022 11:58:28.432822 8564 solver.cpp:236] Iteration 35300, loss = 0.69186
I1022 11:58:28.432859 8564 solver.cpp:252] Train net output #0: loss = 0.69186 (* 1 = 0.69186 loss)
I1022 11:58:28.432867 8564 sgd_solver.cpp:106] Iteration 35300, lr = 1e-05
I1022 11:58:44.333853 8564 solver.cpp:236] Iteration 35400, loss = 0.623849
I1022 11:58:44.333941 8564 solver.cpp:252] Train net output #0: loss = 0.623849 (* 1 = 0.623849 loss)
I1022 11:58:44.333947 8564 sgd_solver.cpp:106] Iteration 35400, lr = 1e-05
I1022 11:59:00.238854 8564 solver.cpp:236] Iteration 35500, loss = 0.722968
I1022 11:59:00.238890 8564 solver.cpp:252] Train net output #0: loss = 0.722968 (* 1 = 0.722968 loss)
I1022 11:59:00.238895 8564 sgd_solver.cpp:106] Iteration 35500, lr = 1e-05
I1022 11:59:14.286098 8564 solver.cpp:236] Iteration 35600, loss = 0.581258
I1022 11:59:14.286133 8564 solver.cpp:252] Train net output #0: loss = 0.581258 (* 1 = 0.581258 loss)
I1022 11:59:14.286137 8564 sgd_solver.cpp:106] Iteration 35600, lr = 1e-05
I1022 11:59:24.880439 8564 solver.cpp:236] Iteration 35700, loss = 0.810592
I1022 11:59:24.880535 8564 solver.cpp:252] Train net output #0: loss = 0.810592 (* 1 = 0.810592 loss)
I1022 11:59:24.880550 8564 sgd_solver.cpp:106] Iteration 35700, lr = 1e-05
I1022 11:59:36.006855 8564 solver.cpp:236] Iteration 35800, loss = 0.689966
I1022 11:59:36.006881 8564 solver.cpp:252] Train net output #0: loss = 0.689966 (* 1 = 0.689966 loss)
I1022 11:59:36.006886 8564 sgd_solver.cpp:106] Iteration 35800, lr = 1e-05
I1022 11:59:47.159737 8564 solver.cpp:236] Iteration 35900, loss = 0.624944
I1022 11:59:47.159782 8564 solver.cpp:252] Train net output #0: loss = 0.624944 (* 1 = 0.624944 loss)
I1022 11:59:47.159788 8564 sgd_solver.cpp:106] Iteration 35900, lr = 1e-05
I1022 12:00:02.516868 8564 solver.cpp:340] Iteration 36000, Testing net (#0)
I1022 12:00:10.301590 8564 solver.cpp:408] Test net output #0: accuracy = 0.7322
I1022 12:00:10.301635 8564 solver.cpp:408] Test net output #1: loss = 0.787552 (* 1 = 0.787552 loss)
I1022 12:00:10.400202 8564 solver.cpp:236] Iteration 36000, loss = 0.723015
I1022 12:00:10.400240 8564 solver.cpp:252] Train net output #0: loss = 0.723015 (* 1 = 0.723015 loss)
I1022 12:00:10.400246 8564 sgd_solver.cpp:106] Iteration 36000, lr = 1e-05
I1022 12:00:26.318359 8564 solver.cpp:236] Iteration 36100, loss = 0.579465
I1022 12:00:26.318409 8564 solver.cpp:252] Train net output #0: loss = 0.579465 (* 1 = 0.579465 loss)
I1022 12:00:26.318419 8564 sgd_solver.cpp:106] Iteration 36100, lr = 1e-05
I1022 12:00:38.633611 8564 solver.cpp:236] Iteration 36200, loss = 0.809783
I1022 12:00:38.633683 8564 solver.cpp:252] Train net output #0: loss = 0.809783 (* 1 = 0.809783 loss)
I1022 12:00:38.633689 8564 sgd_solver.cpp:106] Iteration 36200, lr = 1e-05
I1022 12:00:49.836977 8564 solver.cpp:236] Iteration 36300, loss = 0.689634
I1022 12:00:49.837014 8564 solver.cpp:252] Train net output #0: loss = 0.689634 (* 1 = 0.689634 loss)
I1022 12:00:49.837020 8564 sgd_solver.cpp:106] Iteration 36300, lr = 1e-05
I1022 12:01:01.212606 8564 solver.cpp:236] Iteration 36400, loss = 0.625275
I1022 12:01:01.212643 8564 solver.cpp:252] Train net output #0: loss = 0.625275 (* 1 = 0.625275 loss)
I1022 12:01:01.212649 8564 sgd_solver.cpp:106] Iteration 36400, lr = 1e-05
I1022 12:01:13.876613 8564 solver.cpp:236] Iteration 36500, loss = 0.722823
I1022 12:01:13.876703 8564 solver.cpp:252] Train net output #0: loss = 0.722823 (* 1 = 0.722823 loss)
I1022 12:01:13.876708 8564 sgd_solver.cpp:106] Iteration 36500, lr = 1e-05
I1022 12:01:24.787740 8564 solver.cpp:236] Iteration 36600, loss = 0.57876
I1022 12:01:24.787770 8564 solver.cpp:252] Train net output #0: loss = 0.57876 (* 1 = 0.57876 loss)
I1022 12:01:24.787775 8564 sgd_solver.cpp:106] Iteration 36600, lr = 1e-05
I1022 12:01:35.699867 8564 solver.cpp:236] Iteration 36700, loss = 0.809348
I1022 12:01:35.699905 8564 solver.cpp:252] Train net output #0: loss = 0.809348 (* 1 = 0.809348 loss)
I1022 12:01:35.699910 8564 sgd_solver.cpp:106] Iteration 36700, lr = 1e-05
I1022 12:01:46.821734 8564 solver.cpp:236] Iteration 36800, loss = 0.689604
I1022 12:01:46.821812 8564 solver.cpp:252] Train net output #0: loss = 0.689604 (* 1 = 0.689604 loss)
I1022 12:01:46.821817 8564 sgd_solver.cpp:106] Iteration 36800, lr = 1e-05
I1022 12:01:57.975872 8564 solver.cpp:236] Iteration 36900, loss = 0.625462
I1022 12:01:57.975899 8564 solver.cpp:252] Train net output #0: loss = 0.625462 (* 1 = 0.625462 loss)
I1022 12:01:57.975905 8564 sgd_solver.cpp:106] Iteration 36900, lr = 1e-05
I1022 12:02:09.113597 8564 solver.cpp:340] Iteration 37000, Testing net (#0)
I1022 12:02:14.513370 8564 solver.cpp:408] Test net output #0: accuracy = 0.7335
I1022 12:02:14.513393 8564 solver.cpp:408] Test net output #1: loss = 0.784984 (* 1 = 0.784984 loss)
I1022 12:02:14.577836 8564 solver.cpp:236] Iteration 37000, loss = 0.72254
I1022 12:02:14.577860 8564 solver.cpp:252] Train net output #0: loss = 0.72254 (* 1 = 0.72254 loss)
I1022 12:02:14.577865 8564 sgd_solver.cpp:106] Iteration 37000, lr = 1e-05
I1022 12:02:25.719784 8564 solver.cpp:236] Iteration 37100, loss = 0.578365
I1022 12:02:25.719871 8564 solver.cpp:252] Train net output #0: loss = 0.578365 (* 1 = 0.578365 loss)
I1022 12:02:25.719877 8564 sgd_solver.cpp:106] Iteration 37100, lr = 1e-05
I1022 12:02:36.856029 8564 solver.cpp:236] Iteration 37200, loss = 0.809059
I1022 12:02:36.856065 8564 solver.cpp:252] Train net output #0: loss = 0.809059 (* 1 = 0.809059 loss)
I1022 12:02:36.856070 8564 sgd_solver.cpp:106] Iteration 37200, lr = 1e-05
I1022 12:02:47.976176 8564 solver.cpp:236] Iteration 37300, loss = 0.689619
I1022 12:02:47.976202 8564 solver.cpp:252] Train net output #0: loss = 0.689619 (* 1 = 0.689619 loss)
I1022 12:02:47.976207 8564 sgd_solver.cpp:106] Iteration 37300, lr = 1e-05
I1022 12:02:59.134668 8564 solver.cpp:236] Iteration 37400, loss = 0.625574
I1022 12:02:59.134768 8564 solver.cpp:252] Train net output #0: loss = 0.625574 (* 1 = 0.625574 loss)
I1022 12:02:59.134774 8564 sgd_solver.cpp:106] Iteration 37400, lr = 1e-05
I1022 12:03:10.289475 8564 solver.cpp:236] Iteration 37500, loss = 0.722203
I1022 12:03:10.289512 8564 solver.cpp:252] Train net output #0: loss = 0.722203 (* 1 = 0.722203 loss)
I1022 12:03:10.289518 8564 sgd_solver.cpp:106] Iteration 37500, lr = 1e-05
I1022 12:03:19.174435 8564 solver.cpp:236] Iteration 37600, loss = 0.57808
I1022 12:03:19.174460 8564 solver.cpp:252] Train net output #0: loss = 0.57808 (* 1 = 0.57808 loss)
I1022 12:03:19.174465 8564 sgd_solver.cpp:106] Iteration 37600, lr = 1e-05
I1022 12:03:26.015373 8564 solver.cpp:236] Iteration 37700, loss = 0.808847
I1022 12:03:26.015408 8564 solver.cpp:252] Train net output #0: loss = 0.808847 (* 1 = 0.808847 loss)
I1022 12:03:26.015414 8564 sgd_solver.cpp:106] Iteration 37700, lr = 1e-05
I1022 12:03:35.227355 8564 solver.cpp:236] Iteration 37800, loss = 0.689611
I1022 12:03:35.227442 8564 solver.cpp:252] Train net output #0: loss = 0.689611 (* 1 = 0.689611 loss)
I1022 12:03:35.227447 8564 sgd_solver.cpp:106] Iteration 37800, lr = 1e-05
I1022 12:03:44.473572 8564 solver.cpp:236] Iteration 37900, loss = 0.625647
I1022 12:03:44.473598 8564 solver.cpp:252] Train net output #0: loss = 0.625647 (* 1 = 0.625647 loss)
I1022 12:03:44.473603 8564 sgd_solver.cpp:106] Iteration 37900, lr = 1e-05
I1022 12:03:50.886652 8564 solver.cpp:340] Iteration 38000, Testing net (#0)
I1022 12:03:53.312328 8564 solver.cpp:408] Test net output #0: accuracy = 0.7346
I1022 12:03:53.312361 8564 solver.cpp:408] Test net output #1: loss = 0.784374 (* 1 = 0.784374 loss)
I1022 12:03:53.341193 8564 solver.cpp:236] Iteration 38000, loss = 0.721833
I1022 12:03:53.341214 8564 solver.cpp:252] Train net output #0: loss = 0.721833 (* 1 = 0.721833 loss)
I1022 12:03:53.341219 8564 sgd_solver.cpp:106] Iteration 38000, lr = 1e-05
I1022 12:03:58.522460 8564 solver.cpp:236] Iteration 38100, loss = 0.577827
I1022 12:03:58.522500 8564 solver.cpp:252] Train net output #0: loss = 0.577827 (* 1 = 0.577827 loss)
I1022 12:03:58.522505 8564 sgd_solver.cpp:106] Iteration 38100, lr = 1e-05
I1022 12:04:06.463690 8564 solver.cpp:236] Iteration 38200, loss = 0.808664
I1022 12:04:06.463769 8564 solver.cpp:252] Train net output #0: loss = 0.808664 (* 1 = 0.808664 loss)
I1022 12:04:06.463783 8564 sgd_solver.cpp:106] Iteration 38200, lr = 1e-05
I1022 12:04:15.694309 8564 solver.cpp:236] Iteration 38300, loss = 0.689592
I1022 12:04:15.694337 8564 solver.cpp:252] Train net output #0: loss = 0.689592 (* 1 = 0.689592 loss)
I1022 12:04:15.694342 8564 sgd_solver.cpp:106] Iteration 38300, lr = 1e-05
I1022 12:04:24.921224 8564 solver.cpp:236] Iteration 38400, loss = 0.625694
I1022 12:04:24.921252 8564 solver.cpp:252] Train net output #0: loss = 0.625694 (* 1 = 0.625694 loss)
I1022 12:04:24.921257 8564 sgd_solver.cpp:106] Iteration 38400, lr = 1e-05
I1022 12:04:34.151561 8564 solver.cpp:236] Iteration 38500, loss = 0.721449
I1022 12:04:34.151587 8564 solver.cpp:252] Train net output #0: loss = 0.721449 (* 1 = 0.721449 loss)
I1022 12:04:34.151592 8564 sgd_solver.cpp:106] Iteration 38500, lr = 1e-05
I1022 12:04:39.363477 8564 solver.cpp:236] Iteration 38600, loss = 0.577588
I1022 12:04:39.363580 8564 solver.cpp:252] Train net output #0: loss = 0.577588 (* 1 = 0.577588 loss)
I1022 12:04:39.363586 8564 sgd_solver.cpp:106] Iteration 38600, lr = 1e-05
I1022 12:04:44.483985 8564 solver.cpp:236] Iteration 38700, loss = 0.808506
I1022 12:04:44.484009 8564 solver.cpp:252] Train net output #0: loss = 0.808506 (* 1 = 0.808506 loss)
I1022 12:04:44.484014 8564 sgd_solver.cpp:106] Iteration 38700, lr = 1e-05
I1022 12:04:49.612747 8564 solver.cpp:236] Iteration 38800, loss = 0.689549
I1022 12:04:49.612782 8564 solver.cpp:252] Train net output #0: loss = 0.689549 (* 1 = 0.689549 loss)
I1022 12:04:49.612787 8564 sgd_solver.cpp:106] Iteration 38800, lr = 1e-05
I1022 12:04:54.751665 8564 solver.cpp:236] Iteration 38900, loss = 0.625724
I1022 12:04:54.751689 8564 solver.cpp:252] Train net output #0: loss = 0.625724 (* 1 = 0.625724 loss)
I1022 12:04:54.751693 8564 sgd_solver.cpp:106] Iteration 38900, lr = 1e-05
I1022 12:05:02.868448 8564 solver.cpp:340] Iteration 39000, Testing net (#0)
I1022 12:05:05.362264 8564 solver.cpp:408] Test net output #0: accuracy = 0.7345
I1022 12:05:05.362287 8564 solver.cpp:408] Test net output #1: loss = 0.783969 (* 1 = 0.783969 loss)
I1022 12:05:05.390991 8564 solver.cpp:236] Iteration 39000, loss = 0.721051
I1022 12:05:05.391011 8564 solver.cpp:252] Train net output #0: loss = 0.721051 (* 1 = 0.721051 loss)
I1022 12:05:05.391016 8564 sgd_solver.cpp:106] Iteration 39000, lr = 1e-05
I1022 12:05:10.499817 8564 solver.cpp:236] Iteration 39100, loss = 0.577356
I1022 12:05:10.499923 8564 solver.cpp:252] Train net output #0: loss = 0.577356 (* 1 = 0.577356 loss)
I1022 12:05:10.499928 8564 sgd_solver.cpp:106] Iteration 39100, lr = 1e-05
I1022 12:05:15.609794 8564 solver.cpp:236] Iteration 39200, loss = 0.808365
I1022 12:05:15.609818 8564 solver.cpp:252] Train net output #0: loss = 0.808365 (* 1 = 0.808365 loss)
I1022 12:05:15.609823 8564 sgd_solver.cpp:106] Iteration 39200, lr = 1e-05
I1022 12:05:20.723965 8564 solver.cpp:236] Iteration 39300, loss = 0.689493
I1022 12:05:20.724001 8564 solver.cpp:252] Train net output #0: loss = 0.689493 (* 1 = 0.689493 loss)
I1022 12:05:20.724006 8564 sgd_solver.cpp:106] Iteration 39300, lr = 1e-05
I1022 12:05:25.915421 8564 solver.cpp:236] Iteration 39400, loss = 0.625735
I1022 12:05:25.915446 8564 solver.cpp:252] Train net output #0: loss = 0.625735 (* 1 = 0.625735 loss)
I1022 12:05:25.915451 8564 sgd_solver.cpp:106] Iteration 39400, lr = 1e-05
I1022 12:05:31.056268 8564 solver.cpp:236] Iteration 39500, loss = 0.720644
I1022 12:05:31.056306 8564 solver.cpp:252] Train net output #0: loss = 0.720644 (* 1 = 0.720644 loss)
I1022 12:05:31.056311 8564 sgd_solver.cpp:106] Iteration 39500, lr = 1e-05
I1022 12:05:36.230237 8564 solver.cpp:236] Iteration 39600, loss = 0.577124
I1022 12:05:36.230263 8564 solver.cpp:252] Train net output #0: loss = 0.577124 (* 1 = 0.577124 loss)
I1022 12:05:36.230268 8564 sgd_solver.cpp:106] Iteration 39600, lr = 1e-05
I1022 12:05:41.367669 8564 solver.cpp:236] Iteration 39700, loss = 0.808229
I1022 12:05:41.367772 8564 solver.cpp:252] Train net output #0: loss = 0.808229 (* 1 = 0.808229 loss)
I1022 12:05:41.367777 8564 sgd_solver.cpp:106] Iteration 39700, lr = 1e-05
I1022 12:05:47.132261 8564 solver.cpp:236] Iteration 39800, loss = 0.689421
I1022 12:05:47.132287 8564 solver.cpp:252] Train net output #0: loss = 0.689421 (* 1 = 0.689421 loss)
I1022 12:05:47.132292 8564 sgd_solver.cpp:106] Iteration 39800, lr = 1e-05
I1022 12:05:52.247568 8564 solver.cpp:236] Iteration 39900, loss = 0.625732
I1022 12:05:52.247591 8564 solver.cpp:252] Train net output #0: loss = 0.625732 (* 1 = 0.625732 loss)
I1022 12:05:52.247596 8564 sgd_solver.cpp:106] Iteration 39900, lr = 1e-05
I1022 12:05:57.339275 8564 solver.cpp:461] Snapshotting to binary proto file examples/cifar10_full_sigmoid_bn_iter_40000.caffemodel
I1022 12:05:57.340836 8564 sgd_solver.cpp:269] Snapshotting solver state to binary proto file examples/cifar10_full_sigmoid_bn_iter_40000.solverstate
I1022 12:05:57.365314 8564 solver.cpp:320] Iteration 40000, loss = 0.720238
I1022 12:05:57.365331 8564 solver.cpp:340] Iteration 40000, Testing net (#0)
I1022 12:05:59.749449 8564 solver.cpp:408] Test net output #0: accuracy = 0.7344
I1022 12:05:59.749474 8564 solver.cpp:408] Test net output #1: loss = 0.783632 (* 1 = 0.783632 loss)
I1022 12:05:59.749477 8564 solver.cpp:325] Optimization Done.
I1022 12:05:59.749480 8564 caffe.cpp:215] Optimization Done.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment