Skip to content

Instantly share code, notes, and snippets.

@erogol
Created February 17, 2015 22:17
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save erogol/5c5b7beae8b088bd00e9 to your computer and use it in GitHub Desktop.
Save erogol/5c5b7beae8b088bd00e9 to your computer and use it in GitHub Desktop.
RMSprop output
I0218 00:09:37.517771 18287 caffe.cpp:99] Use GPU with device ID 0
I0218 00:09:37.842236 18287 caffe.cpp:107] Starting Optimization
I0218 00:09:37.842380 18287 solver.cpp:32] Initializing solver from parameters:
test_iter: 100
test_interval: 500
base_lr: 0.01
display: 100
max_iter: 10000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 5000
snapshot_prefix: "examples/mnist/lenet_rmsprop"
solver_mode: GPU
net: "examples/mnist/lenet_train_test.prototxt"
solver_type: RMSPROP
rms_decay: 0.95
I0218 00:09:37.842483 18287 solver.cpp:70] Creating training net from net file: examples/mnist/lenet_train_test.prototxt
I0218 00:09:37.842988 18287 net.cpp:260] The NetState phase (0) differed from the phase (1) specified by a rule in layer mnist
I0218 00:09:37.843019 18287 net.cpp:260] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I0218 00:09:37.843130 18287 net.cpp:39] Initializing net from parameters:
name: "LeNet"
state {
phase: TRAIN
}
layer {
name: "mnist"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
scale: 0.00390625
}
data_param {
source: "examples/mnist/mnist_train_lmdb"
batch_size: 64
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
I0218 00:09:37.843708 18287 layer_factory.hpp:74] Creating layer mnist
I0218 00:09:37.843739 18287 net.cpp:69] Creating Layer mnist
I0218 00:09:37.843752 18287 net.cpp:341] mnist -> data
I0218 00:09:37.843786 18287 net.cpp:341] mnist -> label
I0218 00:09:37.843802 18287 net.cpp:98] Setting up mnist
I0218 00:09:37.843919 18287 db.cpp:34] Opened lmdb examples/mnist/mnist_train_lmdb
I0218 00:09:37.843979 18287 data_layer.cpp:65] output data size: 64,1,28,28
I0218 00:09:37.844151 18287 net.cpp:105] Top shape: 64 1 28 28 (50176)
I0218 00:09:37.844172 18287 net.cpp:105] Top shape: 64 1 1 1 (64)
I0218 00:09:37.844182 18287 layer_factory.hpp:74] Creating layer conv1
I0218 00:09:37.844202 18287 net.cpp:69] Creating Layer conv1
I0218 00:09:37.844213 18287 net.cpp:379] conv1 <- data
I0218 00:09:37.844243 18287 net.cpp:341] conv1 -> conv1
I0218 00:09:37.844265 18287 net.cpp:98] Setting up conv1
I0218 00:09:37.844705 18287 net.cpp:105] Top shape: 64 20 24 24 (737280)
I0218 00:09:37.844751 18287 layer_factory.hpp:74] Creating layer pool1
I0218 00:09:37.844769 18287 net.cpp:69] Creating Layer pool1
I0218 00:09:37.844779 18287 net.cpp:379] pool1 <- conv1
I0218 00:09:37.844795 18287 net.cpp:341] pool1 -> pool1
I0218 00:09:37.844810 18287 net.cpp:98] Setting up pool1
I0218 00:09:37.844857 18287 net.cpp:105] Top shape: 64 20 12 12 (184320)
I0218 00:09:37.844871 18287 layer_factory.hpp:74] Creating layer conv2
I0218 00:09:37.844892 18287 net.cpp:69] Creating Layer conv2
I0218 00:09:37.844902 18287 net.cpp:379] conv2 <- pool1
I0218 00:09:37.844919 18287 net.cpp:341] conv2 -> conv2
I0218 00:09:37.844941 18287 net.cpp:98] Setting up conv2
I0218 00:09:37.845322 18287 net.cpp:105] Top shape: 64 50 8 8 (204800)
I0218 00:09:37.845446 18287 layer_factory.hpp:74] Creating layer pool2
I0218 00:09:37.845494 18287 net.cpp:69] Creating Layer pool2
I0218 00:09:37.845520 18287 net.cpp:379] pool2 <- conv2
I0218 00:09:37.845557 18287 net.cpp:341] pool2 -> pool2
I0218 00:09:37.845594 18287 net.cpp:98] Setting up pool2
I0218 00:09:37.845624 18287 net.cpp:105] Top shape: 64 50 4 4 (51200)
I0218 00:09:37.845645 18287 layer_factory.hpp:74] Creating layer ip1
I0218 00:09:37.845717 18287 net.cpp:69] Creating Layer ip1
I0218 00:09:37.845747 18287 net.cpp:379] ip1 <- pool2
I0218 00:09:37.845783 18287 net.cpp:341] ip1 -> ip1
I0218 00:09:37.845816 18287 net.cpp:98] Setting up ip1
I0218 00:09:37.851008 18287 net.cpp:105] Top shape: 64 500 1 1 (32000)
I0218 00:09:37.851064 18287 layer_factory.hpp:74] Creating layer relu1
I0218 00:09:37.851085 18287 net.cpp:69] Creating Layer relu1
I0218 00:09:37.851097 18287 net.cpp:379] relu1 <- ip1
I0218 00:09:37.851114 18287 net.cpp:330] relu1 -> ip1 (in-place)
I0218 00:09:37.851130 18287 net.cpp:98] Setting up relu1
I0218 00:09:37.851143 18287 net.cpp:105] Top shape: 64 500 1 1 (32000)
I0218 00:09:37.851153 18287 layer_factory.hpp:74] Creating layer ip2
I0218 00:09:37.851171 18287 net.cpp:69] Creating Layer ip2
I0218 00:09:37.851181 18287 net.cpp:379] ip2 <- ip1
I0218 00:09:37.851194 18287 net.cpp:341] ip2 -> ip2
I0218 00:09:37.851209 18287 net.cpp:98] Setting up ip2
I0218 00:09:37.851282 18287 net.cpp:105] Top shape: 64 10 1 1 (640)
I0218 00:09:37.851300 18287 layer_factory.hpp:74] Creating layer loss
I0218 00:09:37.851318 18287 net.cpp:69] Creating Layer loss
I0218 00:09:37.851330 18287 net.cpp:379] loss <- ip2
I0218 00:09:37.851339 18287 net.cpp:379] loss <- label
I0218 00:09:37.851351 18287 net.cpp:341] loss -> loss
I0218 00:09:37.851367 18287 net.cpp:98] Setting up loss
I0218 00:09:37.851382 18287 layer_factory.hpp:74] Creating layer loss
I0218 00:09:37.851408 18287 net.cpp:105] Top shape: 1 1 1 1 (1)
I0218 00:09:37.851418 18287 net.cpp:111] with loss weight 1
I0218 00:09:37.851461 18287 net.cpp:156] loss needs backward computation.
I0218 00:09:37.851471 18287 net.cpp:156] ip2 needs backward computation.
I0218 00:09:37.851480 18287 net.cpp:156] relu1 needs backward computation.
I0218 00:09:37.851488 18287 net.cpp:156] ip1 needs backward computation.
I0218 00:09:37.851497 18287 net.cpp:156] pool2 needs backward computation.
I0218 00:09:37.851505 18287 net.cpp:156] conv2 needs backward computation.
I0218 00:09:37.851515 18287 net.cpp:156] pool1 needs backward computation.
I0218 00:09:37.851523 18287 net.cpp:156] conv1 needs backward computation.
I0218 00:09:37.851532 18287 net.cpp:158] mnist does not need backward computation.
I0218 00:09:37.851541 18287 net.cpp:194] This network produces output loss
I0218 00:09:37.851560 18287 net.cpp:453] Collecting Learning Rate and Weight Decay.
I0218 00:09:37.851573 18287 net.cpp:206] Network initialization done.
I0218 00:09:37.851583 18287 net.cpp:207] Memory required for data: 5169924
I0218 00:09:37.852119 18287 solver.cpp:154] Creating test net (#0) specified by net file: examples/mnist/lenet_train_test.prototxt
I0218 00:09:37.852161 18287 net.cpp:260] The NetState phase (1) differed from the phase (0) specified by a rule in layer mnist
I0218 00:09:37.852298 18287 net.cpp:39] Initializing net from parameters:
name: "LeNet"
state {
phase: TEST
}
layer {
name: "mnist"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
data_param {
source: "examples/mnist/mnist_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
I0218 00:09:37.852856 18287 layer_factory.hpp:74] Creating layer mnist
I0218 00:09:37.852877 18287 net.cpp:69] Creating Layer mnist
I0218 00:09:37.852890 18287 net.cpp:341] mnist -> data
I0218 00:09:37.852905 18287 net.cpp:341] mnist -> label
I0218 00:09:37.852919 18287 net.cpp:98] Setting up mnist
I0218 00:09:37.853029 18287 db.cpp:34] Opened lmdb examples/mnist/mnist_test_lmdb
I0218 00:09:37.853087 18287 data_layer.cpp:65] output data size: 100,1,28,28
I0218 00:09:37.853464 18287 net.cpp:105] Top shape: 100 1 28 28 (78400)
I0218 00:09:37.853485 18287 net.cpp:105] Top shape: 100 1 1 1 (100)
I0218 00:09:37.853498 18287 layer_factory.hpp:74] Creating layer label_mnist_1_split
I0218 00:09:37.853516 18287 net.cpp:69] Creating Layer label_mnist_1_split
I0218 00:09:37.853528 18287 net.cpp:379] label_mnist_1_split <- label
I0218 00:09:37.853554 18287 net.cpp:341] label_mnist_1_split -> label_mnist_1_split_0
I0218 00:09:37.853577 18287 net.cpp:341] label_mnist_1_split -> label_mnist_1_split_1
I0218 00:09:37.853595 18287 net.cpp:98] Setting up label_mnist_1_split
I0218 00:09:37.853615 18287 net.cpp:105] Top shape: 100 1 1 1 (100)
I0218 00:09:37.853628 18287 net.cpp:105] Top shape: 100 1 1 1 (100)
I0218 00:09:37.853641 18287 layer_factory.hpp:74] Creating layer conv1
I0218 00:09:37.853663 18287 net.cpp:69] Creating Layer conv1
I0218 00:09:37.853677 18287 net.cpp:379] conv1 <- data
I0218 00:09:37.853693 18287 net.cpp:341] conv1 -> conv1
I0218 00:09:37.853709 18287 net.cpp:98] Setting up conv1
I0218 00:09:37.853749 18287 net.cpp:105] Top shape: 100 20 24 24 (1152000)
I0218 00:09:37.853772 18287 layer_factory.hpp:74] Creating layer pool1
I0218 00:09:37.853788 18287 net.cpp:69] Creating Layer pool1
I0218 00:09:37.853801 18287 net.cpp:379] pool1 <- conv1
I0218 00:09:37.853819 18287 net.cpp:341] pool1 -> pool1
I0218 00:09:37.853835 18287 net.cpp:98] Setting up pool1
I0218 00:09:37.853852 18287 net.cpp:105] Top shape: 100 20 12 12 (288000)
I0218 00:09:37.853864 18287 layer_factory.hpp:74] Creating layer conv2
I0218 00:09:37.853881 18287 net.cpp:69] Creating Layer conv2
I0218 00:09:37.853893 18287 net.cpp:379] conv2 <- pool1
I0218 00:09:37.853912 18287 net.cpp:341] conv2 -> conv2
I0218 00:09:37.853930 18287 net.cpp:98] Setting up conv2
I0218 00:09:37.854212 18287 net.cpp:105] Top shape: 100 50 8 8 (320000)
I0218 00:09:37.854231 18287 layer_factory.hpp:74] Creating layer pool2
I0218 00:09:37.854262 18287 net.cpp:69] Creating Layer pool2
I0218 00:09:37.854272 18287 net.cpp:379] pool2 <- conv2
I0218 00:09:37.854285 18287 net.cpp:341] pool2 -> pool2
I0218 00:09:37.854297 18287 net.cpp:98] Setting up pool2
I0218 00:09:37.854308 18287 net.cpp:105] Top shape: 100 50 4 4 (80000)
I0218 00:09:37.854317 18287 layer_factory.hpp:74] Creating layer ip1
I0218 00:09:37.854333 18287 net.cpp:69] Creating Layer ip1
I0218 00:09:37.854343 18287 net.cpp:379] ip1 <- pool2
I0218 00:09:37.854357 18287 net.cpp:341] ip1 -> ip1
I0218 00:09:37.854372 18287 net.cpp:98] Setting up ip1
I0218 00:09:37.859522 18287 net.cpp:105] Top shape: 100 500 1 1 (50000)
I0218 00:09:37.859562 18287 layer_factory.hpp:74] Creating layer relu1
I0218 00:09:37.859580 18287 net.cpp:69] Creating Layer relu1
I0218 00:09:37.859592 18287 net.cpp:379] relu1 <- ip1
I0218 00:09:37.859616 18287 net.cpp:330] relu1 -> ip1 (in-place)
I0218 00:09:37.859637 18287 net.cpp:98] Setting up relu1
I0218 00:09:37.859649 18287 net.cpp:105] Top shape: 100 500 1 1 (50000)
I0218 00:09:37.859661 18287 layer_factory.hpp:74] Creating layer ip2
I0218 00:09:37.859680 18287 net.cpp:69] Creating Layer ip2
I0218 00:09:37.859694 18287 net.cpp:379] ip2 <- ip1
I0218 00:09:37.859714 18287 net.cpp:341] ip2 -> ip2
I0218 00:09:37.859733 18287 net.cpp:98] Setting up ip2
I0218 00:09:37.859820 18287 net.cpp:105] Top shape: 100 10 1 1 (1000)
I0218 00:09:37.859843 18287 layer_factory.hpp:74] Creating layer ip2_ip2_0_split
I0218 00:09:37.859858 18287 net.cpp:69] Creating Layer ip2_ip2_0_split
I0218 00:09:37.859872 18287 net.cpp:379] ip2_ip2_0_split <- ip2
I0218 00:09:37.859904 18287 net.cpp:341] ip2_ip2_0_split -> ip2_ip2_0_split_0
I0218 00:09:37.859922 18287 net.cpp:341] ip2_ip2_0_split -> ip2_ip2_0_split_1
I0218 00:09:37.859936 18287 net.cpp:98] Setting up ip2_ip2_0_split
I0218 00:09:37.859947 18287 net.cpp:105] Top shape: 100 10 1 1 (1000)
I0218 00:09:37.859957 18287 net.cpp:105] Top shape: 100 10 1 1 (1000)
I0218 00:09:37.859971 18287 layer_factory.hpp:74] Creating layer accuracy
I0218 00:09:37.859987 18287 net.cpp:69] Creating Layer accuracy
I0218 00:09:37.859997 18287 net.cpp:379] accuracy <- ip2_ip2_0_split_0
I0218 00:09:37.860009 18287 net.cpp:379] accuracy <- label_mnist_1_split_0
I0218 00:09:37.860021 18287 net.cpp:341] accuracy -> accuracy
I0218 00:09:37.860038 18287 net.cpp:98] Setting up accuracy
I0218 00:09:37.860055 18287 net.cpp:105] Top shape: 1 1 1 1 (1)
I0218 00:09:37.860065 18287 layer_factory.hpp:74] Creating layer loss
I0218 00:09:37.860080 18287 net.cpp:69] Creating Layer loss
I0218 00:09:37.860090 18287 net.cpp:379] loss <- ip2_ip2_0_split_1
I0218 00:09:37.860101 18287 net.cpp:379] loss <- label_mnist_1_split_1
I0218 00:09:37.860113 18287 net.cpp:341] loss -> loss
I0218 00:09:37.860126 18287 net.cpp:98] Setting up loss
I0218 00:09:37.860137 18287 layer_factory.hpp:74] Creating layer loss
I0218 00:09:37.860157 18287 net.cpp:105] Top shape: 1 1 1 1 (1)
I0218 00:09:37.860168 18287 net.cpp:111] with loss weight 1
I0218 00:09:37.860188 18287 net.cpp:156] loss needs backward computation.
I0218 00:09:37.860198 18287 net.cpp:158] accuracy does not need backward computation.
I0218 00:09:37.860210 18287 net.cpp:156] ip2_ip2_0_split needs backward computation.
I0218 00:09:37.860224 18287 net.cpp:156] ip2 needs backward computation.
I0218 00:09:37.860234 18287 net.cpp:156] relu1 needs backward computation.
I0218 00:09:37.860244 18287 net.cpp:156] ip1 needs backward computation.
I0218 00:09:37.860254 18287 net.cpp:156] pool2 needs backward computation.
I0218 00:09:37.860263 18287 net.cpp:156] conv2 needs backward computation.
I0218 00:09:37.860272 18287 net.cpp:156] pool1 needs backward computation.
I0218 00:09:37.860285 18287 net.cpp:156] conv1 needs backward computation.
I0218 00:09:37.860299 18287 net.cpp:158] label_mnist_1_split does not need backward computation.
I0218 00:09:37.860312 18287 net.cpp:158] mnist does not need backward computation.
I0218 00:09:37.860326 18287 net.cpp:194] This network produces output accuracy
I0218 00:09:37.860352 18287 net.cpp:194] This network produces output loss
I0218 00:09:37.860374 18287 net.cpp:453] Collecting Learning Rate and Weight Decay.
I0218 00:09:37.860388 18287 net.cpp:206] Network initialization done.
I0218 00:09:37.860396 18287 net.cpp:207] Memory required for data: 8086808
I0218 00:09:37.860460 18287 solver.cpp:42] Solver scaffolding done.
I0218 00:09:37.860523 18287 solver.cpp:223] Solving LeNet
I0218 00:09:37.860538 18287 solver.cpp:224] Learning Rate Policy: inv
I0218 00:09:37.860555 18287 solver.cpp:267] Iteration 0, Testing net (#0)
I0218 00:09:39.645195 18287 solver.cpp:318] Test net output #0: accuracy = 0.1274
I0218 00:09:39.645300 18287 solver.cpp:318] Test net output #1: loss = 2.30252 (* 1 = 2.30252 loss)
I0218 00:09:39.665407 18287 solver.cpp:189] Iteration 0, loss = 2.3024
I0218 00:09:39.665518 18287 solver.cpp:204] Train net output #0: loss = 2.3024 (* 1 = 2.3024 loss)
I0218 00:09:39.665583 18287 solver.cpp:865] Iteration 0, lr = 0.01
I0218 00:09:42.189071 18287 solver.cpp:189] Iteration 100, loss = 0.2473
I0218 00:09:42.189152 18287 solver.cpp:204] Train net output #0: loss = 0.2473 (* 1 = 0.2473 loss)
I0218 00:09:42.189170 18287 solver.cpp:865] Iteration 100, lr = 0.00992565
I0218 00:09:44.723381 18287 solver.cpp:189] Iteration 200, loss = 0.300951
I0218 00:09:44.723459 18287 solver.cpp:204] Train net output #0: loss = 0.300949 (* 1 = 0.300949 loss)
I0218 00:09:44.723475 18287 solver.cpp:865] Iteration 200, lr = 0.00985258
I0218 00:09:47.255417 18287 solver.cpp:189] Iteration 300, loss = 1.77826
I0218 00:09:47.255523 18287 solver.cpp:204] Train net output #0: loss = 1.77826 (* 1 = 1.77826 loss)
I0218 00:09:47.255542 18287 solver.cpp:865] Iteration 300, lr = 0.00978075
I0218 00:09:49.787093 18287 solver.cpp:189] Iteration 400, loss = 17.9541
I0218 00:09:49.787191 18287 solver.cpp:204] Train net output #0: loss = 17.954 (* 1 = 17.954 loss)
I0218 00:09:49.787214 18287 solver.cpp:865] Iteration 400, lr = 0.00971013
I0218 00:09:52.288933 18287 solver.cpp:267] Iteration 500, Testing net (#0)
I0218 00:09:54.080185 18287 solver.cpp:318] Test net output #0: accuracy = 0.9315
I0218 00:09:54.080265 18287 solver.cpp:318] Test net output #1: loss = 0.275751 (* 1 = 0.275751 loss)
I0218 00:09:54.094460 18287 solver.cpp:189] Iteration 500, loss = 0.174601
I0218 00:09:54.094518 18287 solver.cpp:204] Train net output #0: loss = 0.174593 (* 1 = 0.174593 loss)
I0218 00:09:54.094580 18287 solver.cpp:865] Iteration 500, lr = 0.00964069
I0218 00:09:56.622186 18287 solver.cpp:189] Iteration 600, loss = 0.157269
I0218 00:09:56.622313 18287 solver.cpp:204] Train net output #0: loss = 0.157261 (* 1 = 0.157261 loss)
I0218 00:09:56.622344 18287 solver.cpp:865] Iteration 600, lr = 0.0095724
I0218 00:09:59.149488 18287 solver.cpp:189] Iteration 700, loss = 0.196144
I0218 00:09:59.149572 18287 solver.cpp:204] Train net output #0: loss = 0.196136 (* 1 = 0.196136 loss)
I0218 00:09:59.149590 18287 solver.cpp:865] Iteration 700, lr = 0.00950522
I0218 00:10:01.681129 18287 solver.cpp:189] Iteration 800, loss = 0.435183
I0218 00:10:01.681221 18287 solver.cpp:204] Train net output #0: loss = 0.435176 (* 1 = 0.435176 loss)
I0218 00:10:01.681248 18287 solver.cpp:865] Iteration 800, lr = 0.00943913
I0218 00:10:04.207733 18287 solver.cpp:189] Iteration 900, loss = 1.8259
I0218 00:10:04.207836 18287 solver.cpp:204] Train net output #0: loss = 1.8259 (* 1 = 1.8259 loss)
I0218 00:10:04.207869 18287 solver.cpp:865] Iteration 900, lr = 0.00937411
I0218 00:10:06.712781 18287 solver.cpp:267] Iteration 1000, Testing net (#0)
I0218 00:10:08.504279 18287 solver.cpp:318] Test net output #0: accuracy = 0.9405
I0218 00:10:08.504418 18287 solver.cpp:318] Test net output #1: loss = 0.372385 (* 1 = 0.372385 loss)
I0218 00:10:08.519754 18287 solver.cpp:189] Iteration 1000, loss = 0.343562
I0218 00:10:08.519826 18287 solver.cpp:204] Train net output #0: loss = 0.343554 (* 1 = 0.343554 loss)
I0218 00:10:08.519852 18287 solver.cpp:865] Iteration 1000, lr = 0.00931012
I0218 00:10:11.051611 18287 solver.cpp:189] Iteration 1100, loss = 0.0122096
I0218 00:10:11.051688 18287 solver.cpp:204] Train net output #0: loss = 0.0122015 (* 1 = 0.0122015 loss)
I0218 00:10:11.051708 18287 solver.cpp:865] Iteration 1100, lr = 0.00924715
I0218 00:10:13.583997 18287 solver.cpp:189] Iteration 1200, loss = 0.000749707
I0218 00:10:13.584076 18287 solver.cpp:204] Train net output #0: loss = 0.000741967 (* 1 = 0.000741967 loss)
I0218 00:10:13.584094 18287 solver.cpp:865] Iteration 1200, lr = 0.00918515
I0218 00:10:16.115699 18287 solver.cpp:189] Iteration 1300, loss = 0.0887985
I0218 00:10:16.115772 18287 solver.cpp:204] Train net output #0: loss = 0.0887906 (* 1 = 0.0887906 loss)
I0218 00:10:16.115790 18287 solver.cpp:865] Iteration 1300, lr = 0.00912412
I0218 00:10:18.646993 18287 solver.cpp:189] Iteration 1400, loss = 0.0200622
I0218 00:10:18.647094 18287 solver.cpp:204] Train net output #0: loss = 0.0200546 (* 1 = 0.0200546 loss)
I0218 00:10:18.647111 18287 solver.cpp:865] Iteration 1400, lr = 0.00906403
I0218 00:10:21.149834 18287 solver.cpp:267] Iteration 1500, Testing net (#0)
I0218 00:10:22.943310 18287 solver.cpp:318] Test net output #0: accuracy = 0.9602
I0218 00:10:22.943404 18287 solver.cpp:318] Test net output #1: loss = 0.465681 (* 1 = 0.465681 loss)
I0218 00:10:22.959288 18287 solver.cpp:189] Iteration 1500, loss = 0.522469
I0218 00:10:22.959344 18287 solver.cpp:204] Train net output #0: loss = 0.522457 (* 1 = 0.522457 loss)
I0218 00:10:22.959372 18287 solver.cpp:865] Iteration 1500, lr = 0.00900485
I0218 00:10:25.492070 18287 solver.cpp:189] Iteration 1600, loss = 1.2733
I0218 00:10:25.492156 18287 solver.cpp:204] Train net output #0: loss = 1.27329 (* 1 = 1.27329 loss)
I0218 00:10:25.492178 18287 solver.cpp:865] Iteration 1600, lr = 0.00894657
I0218 00:10:28.024866 18287 solver.cpp:189] Iteration 1700, loss = 0.155142
I0218 00:10:28.024951 18287 solver.cpp:204] Train net output #0: loss = 0.155129 (* 1 = 0.155129 loss)
I0218 00:10:28.024973 18287 solver.cpp:865] Iteration 1700, lr = 0.00888916
I0218 00:10:30.557154 18287 solver.cpp:189] Iteration 1800, loss = 0.00107804
I0218 00:10:30.557225 18287 solver.cpp:204] Train net output #0: loss = 0.00106443 (* 1 = 0.00106443 loss)
I0218 00:10:30.557246 18287 solver.cpp:865] Iteration 1800, lr = 0.0088326
I0218 00:10:33.088204 18287 solver.cpp:189] Iteration 1900, loss = 0.235979
I0218 00:10:33.088266 18287 solver.cpp:204] Train net output #0: loss = 0.235966 (* 1 = 0.235966 loss)
I0218 00:10:33.088284 18287 solver.cpp:865] Iteration 1900, lr = 0.00877687
I0218 00:10:35.594615 18287 solver.cpp:267] Iteration 2000, Testing net (#0)
I0218 00:10:37.387531 18287 solver.cpp:318] Test net output #0: accuracy = 0.968
I0218 00:10:37.387603 18287 solver.cpp:318] Test net output #1: loss = 0.22081 (* 1 = 0.22081 loss)
I0218 00:10:37.402442 18287 solver.cpp:189] Iteration 2000, loss = 0.114105
I0218 00:10:37.402547 18287 solver.cpp:204] Train net output #0: loss = 0.114091 (* 1 = 0.114091 loss)
I0218 00:10:37.402595 18287 solver.cpp:865] Iteration 2000, lr = 0.00872196
I0218 00:10:39.936183 18287 solver.cpp:189] Iteration 2100, loss = 0.188322
I0218 00:10:39.936723 18287 solver.cpp:204] Train net output #0: loss = 0.188308 (* 1 = 0.188308 loss)
I0218 00:10:39.936761 18287 solver.cpp:865] Iteration 2100, lr = 0.00866784
I0218 00:10:42.467862 18287 solver.cpp:189] Iteration 2200, loss = 0.174363
I0218 00:10:42.467952 18287 solver.cpp:204] Train net output #0: loss = 0.174349 (* 1 = 0.174349 loss)
I0218 00:10:42.467972 18287 solver.cpp:865] Iteration 2200, lr = 0.0086145
I0218 00:10:45.001584 18287 solver.cpp:189] Iteration 2300, loss = 1.51028
I0218 00:10:45.001654 18287 solver.cpp:204] Train net output #0: loss = 1.51026 (* 1 = 1.51026 loss)
I0218 00:10:45.001670 18287 solver.cpp:865] Iteration 2300, lr = 0.00856192
I0218 00:10:47.532932 18287 solver.cpp:189] Iteration 2400, loss = 0.0571421
I0218 00:10:47.532999 18287 solver.cpp:204] Train net output #0: loss = 0.0571284 (* 1 = 0.0571284 loss)
I0218 00:10:47.533016 18287 solver.cpp:865] Iteration 2400, lr = 0.00851008
I0218 00:10:50.034396 18287 solver.cpp:267] Iteration 2500, Testing net (#0)
I0218 00:10:51.826880 18287 solver.cpp:318] Test net output #0: accuracy = 0.9502
I0218 00:10:51.826951 18287 solver.cpp:318] Test net output #1: loss = 0.316549 (* 1 = 0.316549 loss)
I0218 00:10:51.840864 18287 solver.cpp:189] Iteration 2500, loss = 0.35752
I0218 00:10:51.840948 18287 solver.cpp:204] Train net output #0: loss = 0.357506 (* 1 = 0.357506 loss)
I0218 00:10:51.840970 18287 solver.cpp:865] Iteration 2500, lr = 0.00845897
I0218 00:10:54.373668 18287 solver.cpp:189] Iteration 2600, loss = 0.323709
I0218 00:10:54.373757 18287 solver.cpp:204] Train net output #0: loss = 0.323695 (* 1 = 0.323695 loss)
I0218 00:10:54.373780 18287 solver.cpp:865] Iteration 2600, lr = 0.00840857
I0218 00:10:56.904428 18287 solver.cpp:189] Iteration 2700, loss = 2.8204
I0218 00:10:56.904497 18287 solver.cpp:204] Train net output #0: loss = 2.82038 (* 1 = 2.82038 loss)
I0218 00:10:56.904515 18287 solver.cpp:865] Iteration 2700, lr = 0.00835886
I0218 00:10:59.432222 18287 solver.cpp:189] Iteration 2800, loss = 1.28093e-05
I0218 00:10:59.432291 18287 solver.cpp:204] Train net output #0: loss = 3.1665e-08 (* 1 = 3.1665e-08 loss)
I0218 00:10:59.432308 18287 solver.cpp:865] Iteration 2800, lr = 0.00830984
I0218 00:11:01.961597 18287 solver.cpp:189] Iteration 2900, loss = 0.181283
I0218 00:11:01.961678 18287 solver.cpp:204] Train net output #0: loss = 0.181268 (* 1 = 0.181268 loss)
I0218 00:11:01.961697 18287 solver.cpp:865] Iteration 2900, lr = 0.00826148
I0218 00:11:04.469794 18287 solver.cpp:267] Iteration 3000, Testing net (#0)
I0218 00:11:06.260100 18287 solver.cpp:318] Test net output #0: accuracy = 0.9661
I0218 00:11:06.260192 18287 solver.cpp:318] Test net output #1: loss = 0.179078 (* 1 = 0.179078 loss)
I0218 00:11:06.275542 18287 solver.cpp:189] Iteration 3000, loss = 0.0122863
I0218 00:11:06.275601 18287 solver.cpp:204] Train net output #0: loss = 0.0122712 (* 1 = 0.0122712 loss)
I0218 00:11:06.275625 18287 solver.cpp:865] Iteration 3000, lr = 0.00821377
I0218 00:11:08.803006 18287 solver.cpp:189] Iteration 3100, loss = 0.304628
I0218 00:11:08.803081 18287 solver.cpp:204] Train net output #0: loss = 0.30461 (* 1 = 0.30461 loss)
I0218 00:11:08.803097 18287 solver.cpp:865] Iteration 3100, lr = 0.0081667
I0218 00:11:11.334816 18287 solver.cpp:189] Iteration 3200, loss = 0.0557076
I0218 00:11:11.335209 18287 solver.cpp:204] Train net output #0: loss = 0.0556894 (* 1 = 0.0556894 loss)
I0218 00:11:11.335230 18287 solver.cpp:865] Iteration 3200, lr = 0.00812025
I0218 00:11:13.870931 18287 solver.cpp:189] Iteration 3300, loss = 0.127604
I0218 00:11:13.871002 18287 solver.cpp:204] Train net output #0: loss = 0.127587 (* 1 = 0.127587 loss)
I0218 00:11:13.871019 18287 solver.cpp:865] Iteration 3300, lr = 0.00807442
I0218 00:11:16.398757 18287 solver.cpp:189] Iteration 3400, loss = 0.00277695
I0218 00:11:16.398834 18287 solver.cpp:204] Train net output #0: loss = 0.00275979 (* 1 = 0.00275979 loss)
I0218 00:11:16.398852 18287 solver.cpp:865] Iteration 3400, lr = 0.00802918
I0218 00:11:18.904160 18287 solver.cpp:267] Iteration 3500, Testing net (#0)
I0218 00:11:20.697602 18287 solver.cpp:318] Test net output #0: accuracy = 0.9635
I0218 00:11:20.697675 18287 solver.cpp:318] Test net output #1: loss = 0.162479 (* 1 = 0.162479 loss)
I0218 00:11:20.712347 18287 solver.cpp:189] Iteration 3500, loss = 0.043887
I0218 00:11:20.712414 18287 solver.cpp:204] Train net output #0: loss = 0.0438699 (* 1 = 0.0438699 loss)
I0218 00:11:20.712440 18287 solver.cpp:865] Iteration 3500, lr = 0.00798454
I0218 00:11:23.244554 18287 solver.cpp:189] Iteration 3600, loss = 0.723485
I0218 00:11:23.244673 18287 solver.cpp:204] Train net output #0: loss = 0.723469 (* 1 = 0.723469 loss)
I0218 00:11:23.244711 18287 solver.cpp:865] Iteration 3600, lr = 0.00794046
I0218 00:11:25.774935 18287 solver.cpp:189] Iteration 3700, loss = 0.321816
I0218 00:11:25.775010 18287 solver.cpp:204] Train net output #0: loss = 0.3218 (* 1 = 0.3218 loss)
I0218 00:11:25.775030 18287 solver.cpp:865] Iteration 3700, lr = 0.00789695
I0218 00:11:28.303608 18287 solver.cpp:189] Iteration 3800, loss = 0.0855286
I0218 00:11:28.303680 18287 solver.cpp:204] Train net output #0: loss = 0.085514 (* 1 = 0.085514 loss)
I0218 00:11:28.303700 18287 solver.cpp:865] Iteration 3800, lr = 0.007854
I0218 00:11:30.831974 18287 solver.cpp:189] Iteration 3900, loss = 0.189647
I0218 00:11:30.832047 18287 solver.cpp:204] Train net output #0: loss = 0.189632 (* 1 = 0.189632 loss)
I0218 00:11:30.832067 18287 solver.cpp:865] Iteration 3900, lr = 0.00781158
I0218 00:11:33.336210 18287 solver.cpp:267] Iteration 4000, Testing net (#0)
I0218 00:11:35.123931 18287 solver.cpp:318] Test net output #0: accuracy = 0.9707
I0218 00:11:35.124003 18287 solver.cpp:318] Test net output #1: loss = 0.125734 (* 1 = 0.125734 loss)
I0218 00:11:35.138012 18287 solver.cpp:189] Iteration 4000, loss = 0.183932
I0218 00:11:35.138044 18287 solver.cpp:204] Train net output #0: loss = 0.183918 (* 1 = 0.183918 loss)
I0218 00:11:35.138063 18287 solver.cpp:865] Iteration 4000, lr = 0.0077697
I0218 00:11:37.667994 18287 solver.cpp:189] Iteration 4100, loss = 0.154372
I0218 00:11:37.668052 18287 solver.cpp:204] Train net output #0: loss = 0.154358 (* 1 = 0.154358 loss)
I0218 00:11:37.668068 18287 solver.cpp:865] Iteration 4100, lr = 0.00772833
I0218 00:11:40.198734 18287 solver.cpp:189] Iteration 4200, loss = 0.0917956
I0218 00:11:40.198817 18287 solver.cpp:204] Train net output #0: loss = 0.0917812 (* 1 = 0.0917812 loss)
I0218 00:11:40.198837 18287 solver.cpp:865] Iteration 4200, lr = 0.00768748
I0218 00:11:42.732216 18287 solver.cpp:189] Iteration 4300, loss = 0.149174
I0218 00:11:42.732625 18287 solver.cpp:204] Train net output #0: loss = 0.149159 (* 1 = 0.149159 loss)
I0218 00:11:42.732645 18287 solver.cpp:865] Iteration 4300, lr = 0.00764712
I0218 00:11:45.263267 18287 solver.cpp:189] Iteration 4400, loss = 0.331196
I0218 00:11:45.263345 18287 solver.cpp:204] Train net output #0: loss = 0.331181 (* 1 = 0.331181 loss)
I0218 00:11:45.263360 18287 solver.cpp:865] Iteration 4400, lr = 0.00760726
I0218 00:11:47.770839 18287 solver.cpp:267] Iteration 4500, Testing net (#0)
I0218 00:11:49.560883 18287 solver.cpp:318] Test net output #0: accuracy = 0.9718
I0218 00:11:49.560957 18287 solver.cpp:318] Test net output #1: loss = 0.12872 (* 1 = 0.12872 loss)
I0218 00:11:49.574779 18287 solver.cpp:189] Iteration 4500, loss = 0.0764678
I0218 00:11:49.574842 18287 solver.cpp:204] Train net output #0: loss = 0.076453 (* 1 = 0.076453 loss)
I0218 00:11:49.574862 18287 solver.cpp:865] Iteration 4500, lr = 0.00756788
I0218 00:11:52.105761 18287 solver.cpp:189] Iteration 4600, loss = 0.000661626
I0218 00:11:52.105830 18287 solver.cpp:204] Train net output #0: loss = 0.00064672 (* 1 = 0.00064672 loss)
I0218 00:11:52.105849 18287 solver.cpp:865] Iteration 4600, lr = 0.00752897
I0218 00:11:54.638628 18287 solver.cpp:189] Iteration 4700, loss = 0.512471
I0218 00:11:54.638705 18287 solver.cpp:204] Train net output #0: loss = 0.512456 (* 1 = 0.512456 loss)
I0218 00:11:54.638725 18287 solver.cpp:865] Iteration 4700, lr = 0.00749052
I0218 00:11:57.171386 18287 solver.cpp:189] Iteration 4800, loss = 0.512168
I0218 00:11:57.171463 18287 solver.cpp:204] Train net output #0: loss = 0.512153 (* 1 = 0.512153 loss)
I0218 00:11:57.171484 18287 solver.cpp:865] Iteration 4800, lr = 0.00745253
I0218 00:11:59.701508 18287 solver.cpp:189] Iteration 4900, loss = 0.0307502
I0218 00:11:59.701597 18287 solver.cpp:204] Train net output #0: loss = 0.0307347 (* 1 = 0.0307347 loss)
I0218 00:11:59.701619 18287 solver.cpp:865] Iteration 4900, lr = 0.00741498
I0218 00:12:02.221755 18287 solver.cpp:338] Snapshotting to examples/mnist/lenet_rmsprop_iter_5000.caffemodel
I0218 00:12:02.229687 18287 solver.cpp:346] Snapshotting solver state to examples/mnist/lenet_rmsprop_iter_5000.solverstate
I0218 00:12:02.233674 18287 solver.cpp:267] Iteration 5000, Testing net (#0)
I0218 00:12:04.014539 18287 solver.cpp:318] Test net output #0: accuracy = 0.978
I0218 00:12:04.014608 18287 solver.cpp:318] Test net output #1: loss = 0.100376 (* 1 = 0.100376 loss)
I0218 00:12:04.030799 18287 solver.cpp:189] Iteration 5000, loss = 0.0759869
I0218 00:12:04.030868 18287 solver.cpp:204] Train net output #0: loss = 0.0759712 (* 1 = 0.0759712 loss)
I0218 00:12:04.030886 18287 solver.cpp:865] Iteration 5000, lr = 0.00737788
I0218 00:12:06.555861 18287 solver.cpp:189] Iteration 5100, loss = 0.0682653
I0218 00:12:06.555939 18287 solver.cpp:204] Train net output #0: loss = 0.06825 (* 1 = 0.06825 loss)
I0218 00:12:06.555959 18287 solver.cpp:865] Iteration 5100, lr = 0.0073412
I0218 00:12:09.088845 18287 solver.cpp:189] Iteration 5200, loss = 0.326372
I0218 00:12:09.088917 18287 solver.cpp:204] Train net output #0: loss = 0.326359 (* 1 = 0.326359 loss)
I0218 00:12:09.088937 18287 solver.cpp:865] Iteration 5200, lr = 0.00730495
I0218 00:12:11.617974 18287 solver.cpp:189] Iteration 5300, loss = 0.000935599
I0218 00:12:11.618062 18287 solver.cpp:204] Train net output #0: loss = 0.000922842 (* 1 = 0.000922842 loss)
I0218 00:12:11.618083 18287 solver.cpp:865] Iteration 5300, lr = 0.00726911
I0218 00:12:14.146375 18287 solver.cpp:189] Iteration 5400, loss = 0.445889
I0218 00:12:14.146827 18287 solver.cpp:204] Train net output #0: loss = 0.445876 (* 1 = 0.445876 loss)
I0218 00:12:14.146877 18287 solver.cpp:865] Iteration 5400, lr = 0.00723368
I0218 00:12:16.652461 18287 solver.cpp:267] Iteration 5500, Testing net (#0)
I0218 00:12:18.444108 18287 solver.cpp:318] Test net output #0: accuracy = 0.9713
I0218 00:12:18.444169 18287 solver.cpp:318] Test net output #1: loss = 0.17709 (* 1 = 0.17709 loss)
I0218 00:12:18.459504 18287 solver.cpp:189] Iteration 5500, loss = 0.0809034
I0218 00:12:18.459573 18287 solver.cpp:204] Train net output #0: loss = 0.0808908 (* 1 = 0.0808908 loss)
I0218 00:12:18.459594 18287 solver.cpp:865] Iteration 5500, lr = 0.00719865
I0218 00:12:20.990656 18287 solver.cpp:189] Iteration 5600, loss = 0.00201773
I0218 00:12:20.990727 18287 solver.cpp:204] Train net output #0: loss = 0.00200504 (* 1 = 0.00200504 loss)
I0218 00:12:20.990744 18287 solver.cpp:865] Iteration 5600, lr = 0.00716402
I0218 00:12:23.517288 18287 solver.cpp:189] Iteration 5700, loss = 0.00344253
I0218 00:12:23.517359 18287 solver.cpp:204] Train net output #0: loss = 0.00342977 (* 1 = 0.00342977 loss)
I0218 00:12:23.517375 18287 solver.cpp:865] Iteration 5700, lr = 0.00712977
I0218 00:12:26.047602 18287 solver.cpp:189] Iteration 5800, loss = 0.0777794
I0218 00:12:26.047675 18287 solver.cpp:204] Train net output #0: loss = 0.0777658 (* 1 = 0.0777658 loss)
I0218 00:12:26.047693 18287 solver.cpp:865] Iteration 5800, lr = 0.0070959
I0218 00:12:28.573331 18287 solver.cpp:189] Iteration 5900, loss = 0.0960398
I0218 00:12:28.573402 18287 solver.cpp:204] Train net output #0: loss = 0.0960263 (* 1 = 0.0960263 loss)
I0218 00:12:28.573420 18287 solver.cpp:865] Iteration 5900, lr = 0.0070624
I0218 00:12:31.080000 18287 solver.cpp:267] Iteration 6000, Testing net (#0)
I0218 00:12:32.871062 18287 solver.cpp:318] Test net output #0: accuracy = 0.9728
I0218 00:12:32.871163 18287 solver.cpp:318] Test net output #1: loss = 0.125642 (* 1 = 0.125642 loss)
I0218 00:12:32.887373 18287 solver.cpp:189] Iteration 6000, loss = 0.16997
I0218 00:12:32.887485 18287 solver.cpp:204] Train net output #0: loss = 0.169956 (* 1 = 0.169956 loss)
I0218 00:12:32.887522 18287 solver.cpp:865] Iteration 6000, lr = 0.00702927
I0218 00:12:35.416285 18287 solver.cpp:189] Iteration 6100, loss = 0.0242592
I0218 00:12:35.416354 18287 solver.cpp:204] Train net output #0: loss = 0.0242455 (* 1 = 0.0242455 loss)
I0218 00:12:35.416373 18287 solver.cpp:865] Iteration 6100, lr = 0.0069965
I0218 00:12:37.946943 18287 solver.cpp:189] Iteration 6200, loss = 0.269346
I0218 00:12:37.947021 18287 solver.cpp:204] Train net output #0: loss = 0.269332 (* 1 = 0.269332 loss)
I0218 00:12:37.947042 18287 solver.cpp:865] Iteration 6200, lr = 0.00696408
I0218 00:12:40.475997 18287 solver.cpp:189] Iteration 6300, loss = 0.136464
I0218 00:12:40.476070 18287 solver.cpp:204] Train net output #0: loss = 0.136449 (* 1 = 0.136449 loss)
I0218 00:12:40.476089 18287 solver.cpp:865] Iteration 6300, lr = 0.00693201
I0218 00:12:43.005038 18287 solver.cpp:189] Iteration 6400, loss = 4.16407
I0218 00:12:43.005100 18287 solver.cpp:204] Train net output #0: loss = 4.16406 (* 1 = 4.16406 loss)
I0218 00:12:43.005116 18287 solver.cpp:865] Iteration 6400, lr = 0.00690029
I0218 00:12:45.510337 18287 solver.cpp:267] Iteration 6500, Testing net (#0)
I0218 00:12:47.299037 18287 solver.cpp:318] Test net output #0: accuracy = 0.9734
I0218 00:12:47.299121 18287 solver.cpp:318] Test net output #1: loss = 0.119132 (* 1 = 0.119132 loss)
I0218 00:12:47.314934 18287 solver.cpp:189] Iteration 6500, loss = 0.147296
I0218 00:12:47.315024 18287 solver.cpp:204] Train net output #0: loss = 0.147282 (* 1 = 0.147282 loss)
I0218 00:12:47.315050 18287 solver.cpp:865] Iteration 6500, lr = 0.0068689
I0218 00:12:49.846565 18287 solver.cpp:189] Iteration 6600, loss = 0.384219
I0218 00:12:49.846642 18287 solver.cpp:204] Train net output #0: loss = 0.384207 (* 1 = 0.384207 loss)
I0218 00:12:49.846659 18287 solver.cpp:865] Iteration 6600, lr = 0.00683784
I0218 00:12:52.374325 18287 solver.cpp:189] Iteration 6700, loss = 0.116831
I0218 00:12:52.374399 18287 solver.cpp:204] Train net output #0: loss = 0.116819 (* 1 = 0.116819 loss)
I0218 00:12:52.374419 18287 solver.cpp:865] Iteration 6700, lr = 0.00680711
I0218 00:12:54.903542 18287 solver.cpp:189] Iteration 6800, loss = 0.235243
I0218 00:12:54.903625 18287 solver.cpp:204] Train net output #0: loss = 0.235232 (* 1 = 0.235232 loss)
I0218 00:12:54.903641 18287 solver.cpp:865] Iteration 6800, lr = 0.0067767
I0218 00:12:57.435087 18287 solver.cpp:189] Iteration 6900, loss = 0.22399
I0218 00:12:57.435148 18287 solver.cpp:204] Train net output #0: loss = 0.223979 (* 1 = 0.223979 loss)
I0218 00:12:57.435165 18287 solver.cpp:865] Iteration 6900, lr = 0.0067466
I0218 00:12:59.939071 18287 solver.cpp:267] Iteration 7000, Testing net (#0)
I0218 00:13:01.726227 18287 solver.cpp:318] Test net output #0: accuracy = 0.9768
I0218 00:13:01.726297 18287 solver.cpp:318] Test net output #1: loss = 0.101425 (* 1 = 0.101425 loss)
I0218 00:13:01.740597 18287 solver.cpp:189] Iteration 7000, loss = 0.0476399
I0218 00:13:01.740664 18287 solver.cpp:204] Train net output #0: loss = 0.0476288 (* 1 = 0.0476288 loss)
I0218 00:13:01.740684 18287 solver.cpp:865] Iteration 7000, lr = 0.00671681
I0218 00:13:04.275003 18287 solver.cpp:189] Iteration 7100, loss = 0.101275
I0218 00:13:04.275074 18287 solver.cpp:204] Train net output #0: loss = 0.101264 (* 1 = 0.101264 loss)
I0218 00:13:04.275094 18287 solver.cpp:865] Iteration 7100, lr = 0.00668733
I0218 00:13:06.805541 18287 solver.cpp:189] Iteration 7200, loss = 0.00192063
I0218 00:13:06.805613 18287 solver.cpp:204] Train net output #0: loss = 0.0019084 (* 1 = 0.0019084 loss)
I0218 00:13:06.805630 18287 solver.cpp:865] Iteration 7200, lr = 0.00665815
I0218 00:13:09.335770 18287 solver.cpp:189] Iteration 7300, loss = 0.309989
I0218 00:13:09.335842 18287 solver.cpp:204] Train net output #0: loss = 0.309977 (* 1 = 0.309977 loss)
I0218 00:13:09.335860 18287 solver.cpp:865] Iteration 7300, lr = 0.00662927
I0218 00:13:11.864022 18287 solver.cpp:189] Iteration 7400, loss = 0.212198
I0218 00:13:11.864131 18287 solver.cpp:204] Train net output #0: loss = 0.212185 (* 1 = 0.212185 loss)
I0218 00:13:11.864150 18287 solver.cpp:865] Iteration 7400, lr = 0.00660067
I0218 00:13:14.372287 18287 solver.cpp:267] Iteration 7500, Testing net (#0)
I0218 00:13:16.161239 18287 solver.cpp:318] Test net output #0: accuracy = 0.9772
I0218 00:13:16.161509 18287 solver.cpp:318] Test net output #1: loss = 0.0975503 (* 1 = 0.0975503 loss)
I0218 00:13:16.175302 18287 solver.cpp:189] Iteration 7500, loss = 0.0842702
I0218 00:13:16.175359 18287 solver.cpp:204] Train net output #0: loss = 0.0842581 (* 1 = 0.0842581 loss)
I0218 00:13:16.175379 18287 solver.cpp:865] Iteration 7500, lr = 0.00657236
I0218 00:13:18.700619 18287 solver.cpp:189] Iteration 7600, loss = 0.0958112
I0218 00:13:18.700685 18287 solver.cpp:204] Train net output #0: loss = 0.0957991 (* 1 = 0.0957991 loss)
I0218 00:13:18.700701 18287 solver.cpp:865] Iteration 7600, lr = 0.00654433
I0218 00:13:21.230643 18287 solver.cpp:189] Iteration 7700, loss = 1.73653
I0218 00:13:21.230715 18287 solver.cpp:204] Train net output #0: loss = 1.73652 (* 1 = 1.73652 loss)
I0218 00:13:21.230734 18287 solver.cpp:865] Iteration 7700, lr = 0.00651658
I0218 00:13:23.758994 18287 solver.cpp:189] Iteration 7800, loss = 0.0671202
I0218 00:13:23.759073 18287 solver.cpp:204] Train net output #0: loss = 0.0671087 (* 1 = 0.0671087 loss)
I0218 00:13:23.759093 18287 solver.cpp:865] Iteration 7800, lr = 0.00648911
I0218 00:13:26.287282 18287 solver.cpp:189] Iteration 7900, loss = 0.0755292
I0218 00:13:26.287371 18287 solver.cpp:204] Train net output #0: loss = 0.0755177 (* 1 = 0.0755177 loss)
I0218 00:13:26.287394 18287 solver.cpp:865] Iteration 7900, lr = 0.0064619
I0218 00:13:28.793109 18287 solver.cpp:267] Iteration 8000, Testing net (#0)
I0218 00:13:30.583222 18287 solver.cpp:318] Test net output #0: accuracy = 0.9781
I0218 00:13:30.583308 18287 solver.cpp:318] Test net output #1: loss = 0.137231 (* 1 = 0.137231 loss)
I0218 00:13:30.597851 18287 solver.cpp:189] Iteration 8000, loss = 0.0778407
I0218 00:13:30.597913 18287 solver.cpp:204] Train net output #0: loss = 0.0778276 (* 1 = 0.0778276 loss)
I0218 00:13:30.597931 18287 solver.cpp:865] Iteration 8000, lr = 0.00643496
I0218 00:13:33.131682 18287 solver.cpp:189] Iteration 8100, loss = 0.105036
I0218 00:13:33.131772 18287 solver.cpp:204] Train net output #0: loss = 0.105023 (* 1 = 0.105023 loss)
I0218 00:13:33.131794 18287 solver.cpp:865] Iteration 8100, lr = 0.00640827
I0218 00:13:35.658953 18287 solver.cpp:189] Iteration 8200, loss = 0.0265381
I0218 00:13:35.659024 18287 solver.cpp:204] Train net output #0: loss = 0.026525 (* 1 = 0.026525 loss)
I0218 00:13:35.659044 18287 solver.cpp:865] Iteration 8200, lr = 0.00638185
I0218 00:13:38.187402 18287 solver.cpp:189] Iteration 8300, loss = 0.214433
I0218 00:13:38.187474 18287 solver.cpp:204] Train net output #0: loss = 0.21442 (* 1 = 0.21442 loss)
I0218 00:13:38.187494 18287 solver.cpp:865] Iteration 8300, lr = 0.00635567
I0218 00:13:40.719574 18287 solver.cpp:189] Iteration 8400, loss = 0.228905
I0218 00:13:40.719658 18287 solver.cpp:204] Train net output #0: loss = 0.228892 (* 1 = 0.228892 loss)
I0218 00:13:40.719682 18287 solver.cpp:865] Iteration 8400, lr = 0.00632975
I0218 00:13:43.227550 18287 solver.cpp:267] Iteration 8500, Testing net (#0)
I0218 00:13:45.017405 18287 solver.cpp:318] Test net output #0: accuracy = 0.9697
I0218 00:13:45.017480 18287 solver.cpp:318] Test net output #1: loss = 0.213984 (* 1 = 0.213984 loss)
I0218 00:13:45.031404 18287 solver.cpp:189] Iteration 8500, loss = 0.0131855
I0218 00:13:45.031471 18287 solver.cpp:204] Train net output #0: loss = 0.0131746 (* 1 = 0.0131746 loss)
I0218 00:13:45.031492 18287 solver.cpp:865] Iteration 8500, lr = 0.00630407
I0218 00:13:47.564012 18287 solver.cpp:189] Iteration 8600, loss = 0.000582635
I0218 00:13:47.564391 18287 solver.cpp:204] Train net output #0: loss = 0.000571684 (* 1 = 0.000571684 loss)
I0218 00:13:47.564417 18287 solver.cpp:865] Iteration 8600, lr = 0.00627864
I0218 00:13:50.092005 18287 solver.cpp:189] Iteration 8700, loss = 0.0194355
I0218 00:13:50.092087 18287 solver.cpp:204] Train net output #0: loss = 0.0194245 (* 1 = 0.0194245 loss)
I0218 00:13:50.092109 18287 solver.cpp:865] Iteration 8700, lr = 0.00625344
I0218 00:13:52.622400 18287 solver.cpp:189] Iteration 8800, loss = 0.0246709
I0218 00:13:52.622472 18287 solver.cpp:204] Train net output #0: loss = 0.0246599 (* 1 = 0.0246599 loss)
I0218 00:13:52.622488 18287 solver.cpp:865] Iteration 8800, lr = 0.00622847
I0218 00:13:55.152325 18287 solver.cpp:189] Iteration 8900, loss = 0.00211759
I0218 00:13:55.152396 18287 solver.cpp:204] Train net output #0: loss = 0.00210659 (* 1 = 0.00210659 loss)
I0218 00:13:55.152413 18287 solver.cpp:865] Iteration 8900, lr = 0.00620374
I0218 00:13:57.659425 18287 solver.cpp:267] Iteration 9000, Testing net (#0)
I0218 00:13:59.451521 18287 solver.cpp:318] Test net output #0: accuracy = 0.9778
I0218 00:13:59.451597 18287 solver.cpp:318] Test net output #1: loss = 0.10234 (* 1 = 0.10234 loss)
I0218 00:13:59.467736 18287 solver.cpp:189] Iteration 9000, loss = 0.0344848
I0218 00:13:59.467818 18287 solver.cpp:204] Train net output #0: loss = 0.0344727 (* 1 = 0.0344727 loss)
I0218 00:13:59.467864 18287 solver.cpp:865] Iteration 9000, lr = 0.00617924
I0218 00:14:01.999518 18287 solver.cpp:189] Iteration 9100, loss = 0.300251
I0218 00:14:01.999590 18287 solver.cpp:204] Train net output #0: loss = 0.300239 (* 1 = 0.300239 loss)
I0218 00:14:01.999608 18287 solver.cpp:865] Iteration 9100, lr = 0.00615496
I0218 00:14:04.532721 18287 solver.cpp:189] Iteration 9200, loss = 0.0858845
I0218 00:14:04.532888 18287 solver.cpp:204] Train net output #0: loss = 0.0858725 (* 1 = 0.0858725 loss)
I0218 00:14:04.532928 18287 solver.cpp:865] Iteration 9200, lr = 0.0061309
I0218 00:14:07.060529 18287 solver.cpp:189] Iteration 9300, loss = 0.000922419
I0218 00:14:07.060611 18287 solver.cpp:204] Train net output #0: loss = 0.000910394 (* 1 = 0.000910394 loss)
I0218 00:14:07.060626 18287 solver.cpp:865] Iteration 9300, lr = 0.00610706
I0218 00:14:09.595027 18287 solver.cpp:189] Iteration 9400, loss = 0.180992
I0218 00:14:09.595135 18287 solver.cpp:204] Train net output #0: loss = 0.18098 (* 1 = 0.18098 loss)
I0218 00:14:09.595172 18287 solver.cpp:865] Iteration 9400, lr = 0.00608343
I0218 00:14:12.097975 18287 solver.cpp:267] Iteration 9500, Testing net (#0)
I0218 00:14:13.888288 18287 solver.cpp:318] Test net output #0: accuracy = 0.9749
I0218 00:14:13.888373 18287 solver.cpp:318] Test net output #1: loss = 0.104624 (* 1 = 0.104624 loss)
I0218 00:14:13.902729 18287 solver.cpp:189] Iteration 9500, loss = 0.00767925
I0218 00:14:13.902798 18287 solver.cpp:204] Train net output #0: loss = 0.00766708 (* 1 = 0.00766708 loss)
I0218 00:14:13.902830 18287 solver.cpp:865] Iteration 9500, lr = 0.00606002
I0218 00:14:16.433223 18287 solver.cpp:189] Iteration 9600, loss = 0.0161766
I0218 00:14:16.433298 18287 solver.cpp:204] Train net output #0: loss = 0.0161644 (* 1 = 0.0161644 loss)
I0218 00:14:16.433316 18287 solver.cpp:865] Iteration 9600, lr = 0.00603682
I0218 00:14:18.965924 18287 solver.cpp:189] Iteration 9700, loss = 0.13575
I0218 00:14:18.971223 18287 solver.cpp:204] Train net output #0: loss = 0.135738 (* 1 = 0.135738 loss)
I0218 00:14:18.971248 18287 solver.cpp:865] Iteration 9700, lr = 0.00601382
I0218 00:14:21.492367 18287 solver.cpp:189] Iteration 9800, loss = 0.109176
I0218 00:14:21.492485 18287 solver.cpp:204] Train net output #0: loss = 0.109164 (* 1 = 0.109164 loss)
I0218 00:14:21.492506 18287 solver.cpp:865] Iteration 9800, lr = 0.00599102
I0218 00:14:24.023891 18287 solver.cpp:189] Iteration 9900, loss = 0.0852015
I0218 00:14:24.023977 18287 solver.cpp:204] Train net output #0: loss = 0.0851893 (* 1 = 0.0851893 loss)
I0218 00:14:24.023998 18287 solver.cpp:865] Iteration 9900, lr = 0.00596843
I0218 00:14:26.544922 18287 solver.cpp:338] Snapshotting to examples/mnist/lenet_rmsprop_iter_10000.caffemodel
I0218 00:14:26.551739 18287 solver.cpp:346] Snapshotting solver state to examples/mnist/lenet_rmsprop_iter_10000.solverstate
I0218 00:14:26.567471 18287 solver.cpp:249] Iteration 10000, loss = 0.293119
I0218 00:14:26.567528 18287 solver.cpp:267] Iteration 10000, Testing net (#0)
I0218 00:14:28.350311 18287 solver.cpp:318] Test net output #0: accuracy = 0.9782
I0218 00:14:28.350378 18287 solver.cpp:318] Test net output #1: loss = 0.106988 (* 1 = 0.106988 loss)
I0218 00:14:28.350394 18287 solver.cpp:254] Optimization Done.
I0218 00:14:28.350404 18287 caffe.cpp:121] Optimization Done.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment