Last active
May 11, 2017 18:45
-
-
Save fxrcode/37d25b9e04007011d0644c1f09df571c to your computer and use it in GitHub Desktop.
May11_Caffe_Nvidia-Docker_1st_MNIST
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
139 p1@p1-xos:~⟫ sudo nvidia-docker run -ti bvlc/caffe:gpu /bin/bash | |
root@0a1c1b0432ff:/workspace# pwd | |
/workspace | |
root@0a1c1b0432ff:/workspace# lsb_release | |
bash: lsb_release: command not found | |
root@0a1c1b0432ff:/workspace# uname -a | |
Linux 0a1c1b0432ff 4.8.0-51-generic #54~16.04.1-Ubuntu SMP Wed Apr 26 16:00:28 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux | |
root@0a1c1b0432ff:/workspace# nvidia- | |
nvidia-cuda-mps-control nvidia-debugdump nvidia-smi | |
nvidia-cuda-mps-server nvidia-persistenced | |
root@0a1c1b0432ff:/workspace# nvidia-smi |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
root@0a1c1b0432ff:/opt/caffe# ./build/tools/caffe test -model=examples/mnist/lenet_train_test.prototxt -weights=examples/mnist/lenet_iter_10000.caffemodel | |
I0511 18:43:46.670570 534 caffe.cpp:284] Use CPU. | |
I0511 18:43:46.936956 534 net.cpp:294] The NetState phase (1) differed from the phase (0) specified by a rule in layer mnist | |
I0511 18:43:46.937157 534 net.cpp:51] Initializing net from parameters: | |
name: "LeNet" | |
state { | |
phase: TEST | |
level: 0 | |
stage: "" | |
} | |
layer { | |
name: "mnist" | |
type: "Data" | |
top: "data" | |
top: "label" | |
include { | |
phase: TEST | |
} | |
transform_param { | |
scale: 0.00390625 | |
} | |
data_param { | |
source: "examples/mnist/mnist_test_lmdb" | |
batch_size: 100 | |
backend: LMDB | |
} | |
} | |
layer { | |
name: "conv1" | |
type: "Convolution" | |
bottom: "data" | |
top: "conv1" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
convolution_param { | |
num_output: 20 | |
kernel_size: 5 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "pool1" | |
type: "Pooling" | |
bottom: "conv1" | |
top: "pool1" | |
pooling_param { | |
pool: MAX | |
kernel_size: 2 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "conv2" | |
type: "Convolution" | |
bottom: "pool1" | |
top: "conv2" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
convolution_param { | |
num_output: 50 | |
kernel_size: 5 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "pool2" | |
type: "Pooling" | |
bottom: "conv2" | |
top: "pool2" | |
pooling_param { | |
pool: MAX | |
kernel_size: 2 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "ip1" | |
type: "InnerProduct" | |
bottom: "pool2" | |
top: "ip1" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
inner_product_param { | |
num_output: 500 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "relu1" | |
type: "ReLU" | |
bottom: "ip1" | |
top: "ip1" | |
} | |
layer { | |
name: "ip2" | |
type: "InnerProduct" | |
bottom: "ip1" | |
top: "ip2" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
inner_product_param { | |
num_output: 10 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "accuracy" | |
type: "Accuracy" | |
bottom: "ip2" | |
bottom: "label" | |
top: "accuracy" | |
include { | |
phase: TEST | |
} | |
} | |
layer { | |
name: "loss" | |
type: "SoftmaxWithLoss" | |
bottom: "ip2" | |
bottom: "label" | |
top: "loss" | |
} | |
I0511 18:43:46.937371 534 layer_factory.hpp:77] Creating layer mnist | |
I0511 18:43:46.937526 534 db_lmdb.cpp:35] Opened lmdb examples/mnist/mnist_test_lmdb | |
I0511 18:43:46.937548 534 net.cpp:84] Creating Layer mnist | |
I0511 18:43:46.937556 534 net.cpp:380] mnist -> data | |
I0511 18:43:46.937592 534 net.cpp:380] mnist -> label | |
I0511 18:43:46.937618 534 data_layer.cpp:45] output data size: 100,1,28,28 | |
I0511 18:43:46.938709 534 net.cpp:122] Setting up mnist | |
I0511 18:43:46.938736 534 net.cpp:129] Top shape: 100 1 28 28 (78400) | |
I0511 18:43:46.938741 534 net.cpp:129] Top shape: 100 (100) | |
I0511 18:43:46.938745 534 net.cpp:137] Memory required for data: 314000 | |
I0511 18:43:46.938751 534 layer_factory.hpp:77] Creating layer label_mnist_1_split | |
I0511 18:43:46.938778 534 net.cpp:84] Creating Layer label_mnist_1_split | |
I0511 18:43:46.938784 534 net.cpp:406] label_mnist_1_split <- label | |
I0511 18:43:46.938794 534 net.cpp:380] label_mnist_1_split -> label_mnist_1_split_0 | |
I0511 18:43:46.938802 534 net.cpp:380] label_mnist_1_split -> label_mnist_1_split_1 | |
I0511 18:43:46.938809 534 net.cpp:122] Setting up label_mnist_1_split | |
I0511 18:43:46.938813 534 net.cpp:129] Top shape: 100 (100) | |
I0511 18:43:46.938817 534 net.cpp:129] Top shape: 100 (100) | |
I0511 18:43:46.938822 534 net.cpp:137] Memory required for data: 314800 | |
I0511 18:43:46.938824 534 layer_factory.hpp:77] Creating layer conv1 | |
I0511 18:43:46.938837 534 net.cpp:84] Creating Layer conv1 | |
I0511 18:43:46.938840 534 net.cpp:406] conv1 <- data | |
I0511 18:43:46.938846 534 net.cpp:380] conv1 -> conv1 | |
I0511 18:43:47.354722 534 net.cpp:122] Setting up conv1 | |
I0511 18:43:47.354750 534 net.cpp:129] Top shape: 100 20 24 24 (1152000) | |
I0511 18:43:47.354753 534 net.cpp:137] Memory required for data: 4922800 | |
I0511 18:43:47.354769 534 layer_factory.hpp:77] Creating layer pool1 | |
I0511 18:43:47.354854 534 net.cpp:84] Creating Layer pool1 | |
I0511 18:43:47.354873 534 net.cpp:406] pool1 <- conv1 | |
I0511 18:43:47.354894 534 net.cpp:380] pool1 -> pool1 | |
I0511 18:43:47.354955 534 net.cpp:122] Setting up pool1 | |
I0511 18:43:47.354961 534 net.cpp:129] Top shape: 100 20 12 12 (288000) | |
I0511 18:43:47.354965 534 net.cpp:137] Memory required for data: 6074800 | |
I0511 18:43:47.354971 534 layer_factory.hpp:77] Creating layer conv2 | |
I0511 18:43:47.354986 534 net.cpp:84] Creating Layer conv2 | |
I0511 18:43:47.354990 534 net.cpp:406] conv2 <- pool1 | |
I0511 18:43:47.354995 534 net.cpp:380] conv2 -> conv2 | |
I0511 18:43:47.356437 534 net.cpp:122] Setting up conv2 | |
I0511 18:43:47.356446 534 net.cpp:129] Top shape: 100 50 8 8 (320000) | |
I0511 18:43:47.356451 534 net.cpp:137] Memory required for data: 7354800 | |
I0511 18:43:47.356472 534 layer_factory.hpp:77] Creating layer pool2 | |
I0511 18:43:47.356479 534 net.cpp:84] Creating Layer pool2 | |
I0511 18:43:47.356483 534 net.cpp:406] pool2 <- conv2 | |
I0511 18:43:47.356489 534 net.cpp:380] pool2 -> pool2 | |
I0511 18:43:47.356498 534 net.cpp:122] Setting up pool2 | |
I0511 18:43:47.356503 534 net.cpp:129] Top shape: 100 50 4 4 (80000) | |
I0511 18:43:47.356519 534 net.cpp:137] Memory required for data: 7674800 | |
I0511 18:43:47.356523 534 layer_factory.hpp:77] Creating layer ip1 | |
I0511 18:43:47.356529 534 net.cpp:84] Creating Layer ip1 | |
I0511 18:43:47.356533 534 net.cpp:406] ip1 <- pool2 | |
I0511 18:43:47.356536 534 net.cpp:380] ip1 -> ip1 | |
I0511 18:43:47.358913 534 net.cpp:122] Setting up ip1 | |
I0511 18:43:47.358921 534 net.cpp:129] Top shape: 100 500 (50000) | |
I0511 18:43:47.358923 534 net.cpp:137] Memory required for data: 7874800 | |
I0511 18:43:47.358932 534 layer_factory.hpp:77] Creating layer relu1 | |
I0511 18:43:47.358950 534 net.cpp:84] Creating Layer relu1 | |
I0511 18:43:47.358953 534 net.cpp:406] relu1 <- ip1 | |
I0511 18:43:47.358959 534 net.cpp:367] relu1 -> ip1 (in-place) | |
I0511 18:43:47.359544 534 net.cpp:122] Setting up relu1 | |
I0511 18:43:47.359552 534 net.cpp:129] Top shape: 100 500 (50000) | |
I0511 18:43:47.359556 534 net.cpp:137] Memory required for data: 8074800 | |
I0511 18:43:47.359560 534 layer_factory.hpp:77] Creating layer ip2 | |
I0511 18:43:47.359568 534 net.cpp:84] Creating Layer ip2 | |
I0511 18:43:47.359572 534 net.cpp:406] ip2 <- ip1 | |
I0511 18:43:47.359578 534 net.cpp:380] ip2 -> ip2 | |
I0511 18:43:47.359614 534 net.cpp:122] Setting up ip2 | |
I0511 18:43:47.359618 534 net.cpp:129] Top shape: 100 10 (1000) | |
I0511 18:43:47.359622 534 net.cpp:137] Memory required for data: 8078800 | |
I0511 18:43:47.359627 534 layer_factory.hpp:77] Creating layer ip2_ip2_0_split | |
I0511 18:43:47.359632 534 net.cpp:84] Creating Layer ip2_ip2_0_split | |
I0511 18:43:47.359637 534 net.cpp:406] ip2_ip2_0_split <- ip2 | |
I0511 18:43:47.359640 534 net.cpp:380] ip2_ip2_0_split -> ip2_ip2_0_split_0 | |
I0511 18:43:47.359647 534 net.cpp:380] ip2_ip2_0_split -> ip2_ip2_0_split_1 | |
I0511 18:43:47.359652 534 net.cpp:122] Setting up ip2_ip2_0_split | |
I0511 18:43:47.359657 534 net.cpp:129] Top shape: 100 10 (1000) | |
I0511 18:43:47.359660 534 net.cpp:129] Top shape: 100 10 (1000) | |
I0511 18:43:47.359663 534 net.cpp:137] Memory required for data: 8086800 | |
I0511 18:43:47.359666 534 layer_factory.hpp:77] Creating layer accuracy | |
I0511 18:43:47.359671 534 net.cpp:84] Creating Layer accuracy | |
I0511 18:43:47.359675 534 net.cpp:406] accuracy <- ip2_ip2_0_split_0 | |
I0511 18:43:47.359678 534 net.cpp:406] accuracy <- label_mnist_1_split_0 | |
I0511 18:43:47.359683 534 net.cpp:380] accuracy -> accuracy | |
I0511 18:43:47.359690 534 net.cpp:122] Setting up accuracy | |
I0511 18:43:47.359694 534 net.cpp:129] Top shape: (1) | |
I0511 18:43:47.359697 534 net.cpp:137] Memory required for data: 8086804 | |
I0511 18:43:47.359700 534 layer_factory.hpp:77] Creating layer loss | |
I0511 18:43:47.359704 534 net.cpp:84] Creating Layer loss | |
I0511 18:43:47.359707 534 net.cpp:406] loss <- ip2_ip2_0_split_1 | |
I0511 18:43:47.359710 534 net.cpp:406] loss <- label_mnist_1_split_1 | |
I0511 18:43:47.359714 534 net.cpp:380] loss -> loss | |
I0511 18:43:47.359737 534 layer_factory.hpp:77] Creating layer loss | |
I0511 18:43:47.359885 534 net.cpp:122] Setting up loss | |
I0511 18:43:47.359891 534 net.cpp:129] Top shape: (1) | |
I0511 18:43:47.359895 534 net.cpp:132] with loss weight 1 | |
I0511 18:43:47.359917 534 net.cpp:137] Memory required for data: 8086808 | |
I0511 18:43:47.359921 534 net.cpp:198] loss needs backward computation. | |
I0511 18:43:47.359927 534 net.cpp:200] accuracy does not need backward computation. | |
I0511 18:43:47.359932 534 net.cpp:198] ip2_ip2_0_split needs backward computation. | |
I0511 18:43:47.359936 534 net.cpp:198] ip2 needs backward computation. | |
I0511 18:43:47.359939 534 net.cpp:198] relu1 needs backward computation. | |
I0511 18:43:47.359943 534 net.cpp:198] ip1 needs backward computation. | |
I0511 18:43:47.359946 534 net.cpp:198] pool2 needs backward computation. | |
I0511 18:43:47.359951 534 net.cpp:198] conv2 needs backward computation. | |
I0511 18:43:47.359953 534 net.cpp:198] pool1 needs backward computation. | |
I0511 18:43:47.359957 534 net.cpp:198] conv1 needs backward computation. | |
I0511 18:43:47.359962 534 net.cpp:200] label_mnist_1_split does not need backward computation. | |
I0511 18:43:47.359966 534 net.cpp:200] mnist does not need backward computation. | |
I0511 18:43:47.359971 534 net.cpp:242] This network produces output accuracy | |
I0511 18:43:47.359973 534 net.cpp:242] This network produces output loss | |
I0511 18:43:47.359982 534 net.cpp:255] Network initialization done. | |
I0511 18:43:47.361605 534 caffe.cpp:290] Running for 50 iterations. | |
I0511 18:43:47.457063 534 caffe.cpp:313] Batch 0, accuracy = 0.98 | |
I0511 18:43:47.457088 534 caffe.cpp:313] Batch 0, loss = 0.0183153 | |
I0511 18:43:47.503298 534 caffe.cpp:313] Batch 1, accuracy = 1 | |
I0511 18:43:47.503320 534 caffe.cpp:313] Batch 1, loss = 0.0100282 | |
I0511 18:43:47.549146 534 caffe.cpp:313] Batch 2, accuracy = 0.99 | |
I0511 18:43:47.549167 534 caffe.cpp:313] Batch 2, loss = 0.0349494 | |
I0511 18:43:47.594298 534 caffe.cpp:313] Batch 3, accuracy = 0.99 | |
I0511 18:43:47.594318 534 caffe.cpp:313] Batch 3, loss = 0.0195375 | |
I0511 18:43:47.639641 534 caffe.cpp:313] Batch 4, accuracy = 0.98 | |
I0511 18:43:47.639662 534 caffe.cpp:313] Batch 4, loss = 0.0445807 | |
I0511 18:43:47.685220 534 caffe.cpp:313] Batch 5, accuracy = 0.99 | |
I0511 18:43:47.685240 534 caffe.cpp:313] Batch 5, loss = 0.0397243 | |
I0511 18:43:47.730939 534 caffe.cpp:313] Batch 6, accuracy = 0.98 | |
I0511 18:43:47.730958 534 caffe.cpp:313] Batch 6, loss = 0.0736036 | |
I0511 18:43:47.776216 534 caffe.cpp:313] Batch 7, accuracy = 0.99 | |
I0511 18:43:47.776237 534 caffe.cpp:313] Batch 7, loss = 0.0487895 | |
I0511 18:43:47.821794 534 caffe.cpp:313] Batch 8, accuracy = 1 | |
I0511 18:43:47.821813 534 caffe.cpp:313] Batch 8, loss = 0.00737563 | |
I0511 18:43:47.867640 534 caffe.cpp:313] Batch 9, accuracy = 0.98 | |
I0511 18:43:47.867660 534 caffe.cpp:313] Batch 9, loss = 0.0331768 | |
I0511 18:43:47.913029 534 caffe.cpp:313] Batch 10, accuracy = 0.98 | |
I0511 18:43:47.913048 534 caffe.cpp:313] Batch 10, loss = 0.0608895 | |
I0511 18:43:47.958816 534 caffe.cpp:313] Batch 11, accuracy = 0.98 | |
I0511 18:43:47.958837 534 caffe.cpp:313] Batch 11, loss = 0.0378952 | |
I0511 18:43:48.004338 534 caffe.cpp:313] Batch 12, accuracy = 0.95 | |
I0511 18:43:48.004355 534 caffe.cpp:313] Batch 12, loss = 0.168317 | |
I0511 18:43:48.050546 534 caffe.cpp:313] Batch 13, accuracy = 0.98 | |
I0511 18:43:48.050567 534 caffe.cpp:313] Batch 13, loss = 0.0382882 | |
I0511 18:43:48.096148 534 caffe.cpp:313] Batch 14, accuracy = 1 | |
I0511 18:43:48.096168 534 caffe.cpp:313] Batch 14, loss = 0.0089751 | |
I0511 18:43:48.142673 534 caffe.cpp:313] Batch 15, accuracy = 0.99 | |
I0511 18:43:48.142694 534 caffe.cpp:313] Batch 15, loss = 0.0316318 | |
I0511 18:43:48.188145 534 caffe.cpp:313] Batch 16, accuracy = 1 | |
I0511 18:43:48.188166 534 caffe.cpp:313] Batch 16, loss = 0.0158275 | |
I0511 18:43:48.233964 534 caffe.cpp:313] Batch 17, accuracy = 0.99 | |
I0511 18:43:48.233986 534 caffe.cpp:313] Batch 17, loss = 0.0329577 | |
I0511 18:43:48.279325 534 caffe.cpp:313] Batch 18, accuracy = 1 | |
I0511 18:43:48.279369 534 caffe.cpp:313] Batch 18, loss = 0.00947101 | |
I0511 18:43:48.325031 534 caffe.cpp:313] Batch 19, accuracy = 0.99 | |
I0511 18:43:48.325052 534 caffe.cpp:313] Batch 19, loss = 0.0584296 | |
I0511 18:43:48.370820 534 caffe.cpp:313] Batch 20, accuracy = 0.98 | |
I0511 18:43:48.370842 534 caffe.cpp:313] Batch 20, loss = 0.0893826 | |
I0511 18:43:48.416714 534 caffe.cpp:313] Batch 21, accuracy = 0.97 | |
I0511 18:43:48.416733 534 caffe.cpp:313] Batch 21, loss = 0.0563625 | |
I0511 18:43:48.463677 534 caffe.cpp:313] Batch 22, accuracy = 0.99 | |
I0511 18:43:48.463696 534 caffe.cpp:313] Batch 22, loss = 0.0275421 | |
I0511 18:43:48.509598 534 caffe.cpp:313] Batch 23, accuracy = 1 | |
I0511 18:43:48.509618 534 caffe.cpp:313] Batch 23, loss = 0.0178095 | |
I0511 18:43:48.555642 534 caffe.cpp:313] Batch 24, accuracy = 0.99 | |
I0511 18:43:48.555661 534 caffe.cpp:313] Batch 24, loss = 0.0440083 | |
I0511 18:43:48.601516 534 caffe.cpp:313] Batch 25, accuracy = 0.99 | |
I0511 18:43:48.601536 534 caffe.cpp:313] Batch 25, loss = 0.08853 | |
I0511 18:43:48.647208 534 caffe.cpp:313] Batch 26, accuracy = 0.98 | |
I0511 18:43:48.647228 534 caffe.cpp:313] Batch 26, loss = 0.104539 | |
I0511 18:43:48.692878 534 caffe.cpp:313] Batch 27, accuracy = 0.99 | |
I0511 18:43:48.692899 534 caffe.cpp:313] Batch 27, loss = 0.0166222 | |
I0511 18:43:48.738636 534 caffe.cpp:313] Batch 28, accuracy = 0.98 | |
I0511 18:43:48.738657 534 caffe.cpp:313] Batch 28, loss = 0.0579117 | |
I0511 18:43:48.783928 534 caffe.cpp:313] Batch 29, accuracy = 0.96 | |
I0511 18:43:48.783947 534 caffe.cpp:313] Batch 29, loss = 0.117745 | |
I0511 18:43:48.829802 534 caffe.cpp:313] Batch 30, accuracy = 1 | |
I0511 18:43:48.829823 534 caffe.cpp:313] Batch 30, loss = 0.0201589 | |
I0511 18:43:48.875820 534 caffe.cpp:313] Batch 31, accuracy = 1 | |
I0511 18:43:48.875840 534 caffe.cpp:313] Batch 31, loss = 0.00375302 | |
I0511 18:43:48.921403 534 caffe.cpp:313] Batch 32, accuracy = 1 | |
I0511 18:43:48.921422 534 caffe.cpp:313] Batch 32, loss = 0.00625853 | |
I0511 18:43:48.967042 534 caffe.cpp:313] Batch 33, accuracy = 1 | |
I0511 18:43:48.967061 534 caffe.cpp:313] Batch 33, loss = 0.00372678 | |
I0511 18:43:49.015280 534 caffe.cpp:313] Batch 34, accuracy = 0.99 | |
I0511 18:43:49.015301 534 caffe.cpp:313] Batch 34, loss = 0.0505239 | |
I0511 18:43:49.065428 534 caffe.cpp:313] Batch 35, accuracy = 0.97 | |
I0511 18:43:49.065460 534 caffe.cpp:313] Batch 35, loss = 0.106434 | |
I0511 18:43:49.115597 534 caffe.cpp:313] Batch 36, accuracy = 1 | |
I0511 18:43:49.115624 534 caffe.cpp:313] Batch 36, loss = 0.00438205 | |
I0511 18:43:49.165475 534 caffe.cpp:313] Batch 37, accuracy = 0.98 | |
I0511 18:43:49.165503 534 caffe.cpp:313] Batch 37, loss = 0.0538784 | |
I0511 18:43:49.214176 534 caffe.cpp:313] Batch 38, accuracy = 1 | |
I0511 18:43:49.214228 534 caffe.cpp:313] Batch 38, loss = 0.0154643 | |
I0511 18:43:49.261406 534 caffe.cpp:313] Batch 39, accuracy = 0.98 | |
I0511 18:43:49.261427 534 caffe.cpp:313] Batch 39, loss = 0.0390331 | |
I0511 18:43:49.307471 534 caffe.cpp:313] Batch 40, accuracy = 0.98 | |
I0511 18:43:49.307490 534 caffe.cpp:313] Batch 40, loss = 0.0450034 | |
I0511 18:43:49.353212 534 caffe.cpp:313] Batch 41, accuracy = 0.98 | |
I0511 18:43:49.353232 534 caffe.cpp:313] Batch 41, loss = 0.0778003 | |
I0511 18:43:49.399161 534 caffe.cpp:313] Batch 42, accuracy = 0.99 | |
I0511 18:43:49.399180 534 caffe.cpp:313] Batch 42, loss = 0.0391202 | |
I0511 18:43:49.445185 534 caffe.cpp:313] Batch 43, accuracy = 1 | |
I0511 18:43:49.445204 534 caffe.cpp:313] Batch 43, loss = 0.0153738 | |
I0511 18:43:49.492451 534 caffe.cpp:313] Batch 44, accuracy = 0.98 | |
I0511 18:43:49.492471 534 caffe.cpp:313] Batch 44, loss = 0.0297577 | |
I0511 18:43:49.538363 534 caffe.cpp:313] Batch 45, accuracy = 0.98 | |
I0511 18:43:49.538383 534 caffe.cpp:313] Batch 45, loss = 0.0279536 | |
I0511 18:43:49.583789 534 caffe.cpp:313] Batch 46, accuracy = 1 | |
I0511 18:43:49.583808 534 caffe.cpp:313] Batch 46, loss = 0.00990225 | |
I0511 18:43:49.629189 534 caffe.cpp:313] Batch 47, accuracy = 0.99 | |
I0511 18:43:49.629209 534 caffe.cpp:313] Batch 47, loss = 0.0110674 | |
I0511 18:43:49.675367 534 caffe.cpp:313] Batch 48, accuracy = 0.98 | |
I0511 18:43:49.675389 534 caffe.cpp:313] Batch 48, loss = 0.0835327 | |
I0511 18:43:49.720705 534 caffe.cpp:313] Batch 49, accuracy = 1 | |
I0511 18:43:49.720723 534 caffe.cpp:313] Batch 49, loss = 0.00869715 | |
I0511 18:43:49.720726 534 caffe.cpp:318] Loss: 0.0413007 | |
I0511 18:43:49.720738 534 caffe.cpp:330] accuracy = 0.9874 | |
I0511 18:43:49.720748 534 caffe.cpp:330] loss = 0.0413007 (* 1 = 0.0413007 loss) | |
root@0a1c1b0432ff:/opt/caffe# |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
root@0a1c1b0432ff:/opt/caffe/data/mnist# ./get_mnist.sh | |
Downloading... | |
--2017-05-11 18:11:40-- http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz | |
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6 | |
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected. | |
HTTP request sent, awaiting response... 200 OK | |
Length: 9912422 (9.5M) [application/x-gzip] | |
Saving to: 'train-images-idx3-ubyte.gz' | |
train-images-idx3-ubyte.gz 100%[=================================================>] 9.45M 667KB/s in 14s | |
2017-05-11 18:11:54 (700 KB/s) - 'train-images-idx3-ubyte.gz' saved [9912422/9912422] | |
--2017-05-11 18:11:54-- http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz | |
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6 | |
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected. | |
HTTP request sent, awaiting response... 200 OK | |
lsof strace | |
The following NEW packages will be installed: | |
htop | |
0 upgraded, 1 newly installed, 0 to remove and 24 not upgraded. | |
Need to get 76.4 kB of archives. | |
After this operation, 215 kB of additional disk space will be used. | |
Get:1 http://archive.ubuntu.com/ubuntu xenial-updates/universe amd64 htop amd64 2.0.1-1ubuntu1 [76.4 kB] | |
Fetched 76.4 kB in 0s (128 kB/s) | |
debconf: delaying package configuration, since apt-utils is not installed | |
Selecting previously unselected package htop. | |
(Reading database ... 39335 files and directories currently installed.) | |
Preparing to unpack .../htop_2.0.1-1ubuntu1_amd64.deb ... | |
Unpacking htop (2.0.1-1ubuntu1) ... | |
Processing triggers for mime-support (3.59ubuntu1) ... | |
Setting up htop (2.0.1-1ubuntu1) ... | |
root@0a1c1b0432ff:/opt/caffe# htop | |
root@0a1c1b0432ff:/opt/caffe# vim | |
root@0a1c1b0432ff:/opt/caffe# vi Makefile.config | |
root@0a1c1b0432ff:/opt/caffe# cd data/mnist/ | |
root@0a1c1b0432ff:/opt/caffe/data/mnist# ./get_mnist.sh | |
Downloading... | |
--2017-05-11 18:11:40-- http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz | |
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6 | |
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected. | |
HTTP request sent, awaiting response... 200 OK | |
Length: 9912422 (9.5M) [application/x-gzip] | |
Saving to: 'train-images-idx3-ubyte.gz' | |
train-images-idx3-ubyte.gz 100%[=================================================>] 9.45M 667KB/s in 14s | |
2017-05-11 18:11:54 (700 KB/s) - 'train-images-idx3-ubyte.gz' saved [9912422/9912422] | |
--2017-05-11 18:11:54-- http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz | |
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6 | |
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected. | |
HTTP request sent, awaiting response... 200 OK | |
Length: 28881 (28K) [application/x-gzip] | |
Saving to: 'train-labels-idx1-ubyte.gz' | |
train-labels-idx1-ubyte.gz 100%[=================================================>] 28.20K --.-KB/s in 0.07s | |
2017-05-11 18:11:54 (377 KB/s) - 'train-labels-idx1-ubyte.gz' saved [28881/28881] | |
--2017-05-11 18:11:54-- http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz | |
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6 | |
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected. | |
HTTP request sent, awaiting response... 200 OK | |
Length: 1648877 (1.6M) [application/x-gzip] | |
Saving to: 't10k-images-idx3-ubyte.gz' | |
t10k-images-idx3-ubyte.gz 100%[=================================================>] 1.57M 767KB/s in 2.1s | |
2017-05-11 18:11:57 (767 KB/s) - 't10k-images-idx3-ubyte.gz' saved [1648877/1648877] | |
--2017-05-11 18:11:57-- http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz | |
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6 | |
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected. | |
HTTP request sent, awaiting response... 200 OK | |
Length: 4542 (4.4K) [application/x-gzip] | |
Saving to: 't10k-labels-idx1-ubyte.gz' | |
t10k-labels-idx1-ubyte.gz 100%[=================================================>] 4.44K --.-KB/s in 0s | |
2017-05-11 18:11:57 (339 MB/s) - 't10k-labels-idx1-ubyte.gz' saved [4542/4542] | |
root@0a1c1b0432ff:/opt/caffe/data/mnist# cd $CAFFE_ROOT | |
root@0a1c1b0432ff:/opt/caffe# ./examples/mnist/create_mnist.sh | |
Creating lmdb... | |
I0511 18:12:50.087297 511 db_lmdb.cpp:35] Opened lmdb examples/mnist/mnist_train_lmdb | |
I0511 18:12:50.087496 511 convert_mnist_data.cpp:88] A total of 60000 items. | |
I0511 18:12:50.087502 511 convert_mnist_data.cpp:89] Rows: 28 Cols: 28 | |
I0511 18:12:54.769269 511 convert_mnist_data.cpp:108] Processed 60000 files. | |
I0511 18:12:55.185133 516 db_lmdb.cpp:35] Opened lmdb examples/mnist/mnist_test_lmdb | |
I0511 18:12:55.185369 516 convert_mnist_data.cpp:88] A total of 10000 items. | |
I0511 18:12:55.185376 516 convert_mnist_data.cpp:89] Rows: 28 Cols: 28 | |
I0511 18:12:55.915678 516 convert_mnist_data.cpp:108] Processed 10000 files. | |
Done. | |
root@0a1c1b0432ff:/opt/caffe# time ./examples/mnist/train_lenet.sh | |
I0511 18:13:14.300827 522 caffe.cpp:218] Using GPUs 0 | |
I0511 18:13:14.330199 522 caffe.cpp:223] GPU 0: GeForce GTX 1080 | |
I0511 18:13:14.614657 522 solver.cpp:44] Initializing solver from parameters: | |
test_iter: 100 | |
test_interval: 500 | |
base_lr: 0.01 | |
display: 100 | |
max_iter: 10000 | |
lr_policy: "inv" | |
gamma: 0.0001 | |
power: 0.75 | |
momentum: 0.9 | |
weight_decay: 0.0005 | |
snapshot: 5000 | |
snapshot_prefix: "examples/mnist/lenet" | |
solver_mode: GPU | |
device_id: 0 | |
net: "examples/mnist/lenet_train_test.prototxt" | |
train_state { | |
level: 0 | |
stage: "" | |
} | |
I0511 18:13:14.614894 522 solver.cpp:87] Creating training net from net file: examples/mnist/lenet_train_test.prototxt | |
I0511 18:13:14.615150 522 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer mnist | |
I0511 18:13:14.615164 522 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy | |
I0511 18:13:14.615250 522 net.cpp:51] Initializing net from parameters: | |
name: "LeNet" | |
state { | |
phase: TRAIN | |
level: 0 | |
stage: "" | |
} | |
layer { | |
name: "mnist" | |
type: "Data" | |
top: "data" | |
top: "label" | |
include { | |
phase: TRAIN | |
} | |
transform_param { | |
scale: 0.00390625 | |
} | |
data_param { | |
source: "examples/mnist/mnist_train_lmdb" | |
batch_size: 64 | |
backend: LMDB | |
} | |
} | |
layer { | |
name: "conv1" | |
type: "Convolution" | |
bottom: "data" | |
top: "conv1" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
convolution_param { | |
num_output: 20 | |
kernel_size: 5 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "pool1" | |
type: "Pooling" | |
bottom: "conv1" | |
top: "pool1" | |
pooling_param { | |
pool: MAX | |
kernel_size: 2 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "conv2" | |
type: "Convolution" | |
bottom: "pool1" | |
top: "conv2" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
convolution_param { | |
num_output: 50 | |
kernel_size: 5 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "pool2" | |
type: "Pooling" | |
bottom: "conv2" | |
top: "pool2" | |
pooling_param { | |
pool: MAX | |
kernel_size: 2 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "ip1" | |
type: "InnerProduct" | |
bottom: "pool2" | |
top: "ip1" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
inner_product_param { | |
num_output: 500 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "relu1" | |
type: "ReLU" | |
bottom: "ip1" | |
top: "ip1" | |
} | |
layer { | |
name: "ip2" | |
type: "InnerProduct" | |
bottom: "ip1" | |
top: "ip2" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
inner_product_param { | |
num_output: 10 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "loss" | |
type: "SoftmaxWithLoss" | |
bottom: "ip2" | |
bottom: "label" | |
top: "loss" | |
} | |
I0511 18:13:14.615370 522 layer_factory.hpp:77] Creating layer mnist | |
I0511 18:13:14.615486 522 db_lmdb.cpp:35] Opened lmdb examples/mnist/mnist_train_lmdb | |
I0511 18:13:14.615514 522 net.cpp:84] Creating Layer mnist | |
I0511 18:13:14.615521 522 net.cpp:380] mnist -> data | |
I0511 18:13:14.615545 522 net.cpp:380] mnist -> label | |
I0511 18:13:14.616421 522 data_layer.cpp:45] output data size: 64,1,28,28 | |
I0511 18:13:14.618146 522 net.cpp:122] Setting up mnist | |
I0511 18:13:14.618163 522 net.cpp:129] Top shape: 64 1 28 28 (50176) | |
I0511 18:13:14.618168 522 net.cpp:129] Top shape: 64 (64) | |
I0511 18:13:14.618172 522 net.cpp:137] Memory required for data: 200960 | |
I0511 18:13:14.618182 522 layer_factory.hpp:77] Creating layer conv1 | |
I0511 18:13:14.618207 522 net.cpp:84] Creating Layer conv1 | |
I0511 18:13:14.618216 522 net.cpp:406] conv1 <- data | |
I0511 18:13:14.618229 522 net.cpp:380] conv1 -> conv1 | |
I0511 18:13:15.536608 522 net.cpp:122] Setting up conv1 | |
I0511 18:13:15.536633 522 net.cpp:129] Top shape: 64 20 24 24 (737280) | |
I0511 18:13:15.536650 522 net.cpp:137] Memory required for data: 3150080 | |
I0511 18:13:15.536698 522 layer_factory.hpp:77] Creating layer pool1 | |
I0511 18:13:15.536727 522 net.cpp:84] Creating Layer pool1 | |
I0511 18:13:15.536798 522 net.cpp:406] pool1 <- conv1 | |
I0511 18:13:15.536820 522 net.cpp:380] pool1 -> pool1 | |
I0511 18:13:15.536887 522 net.cpp:122] Setting up pool1 | |
I0511 18:13:15.536896 522 net.cpp:129] Top shape: 64 20 12 12 (184320) | |
I0511 18:13:15.536905 522 net.cpp:137] Memory required for data: 3887360 | |
I0511 18:13:15.536908 522 layer_factory.hpp:77] Creating layer conv2 | |
I0511 18:13:15.536921 522 net.cpp:84] Creating Layer conv2 | |
I0511 18:13:15.536923 522 net.cpp:406] conv2 <- pool1 | |
I0511 18:13:15.536929 522 net.cpp:380] conv2 -> conv2 | |
I0511 18:13:15.538609 522 net.cpp:122] Setting up conv2 | |
I0511 18:13:15.538635 522 net.cpp:129] Top shape: 64 50 8 8 (204800) | |
I0511 18:13:15.538640 522 net.cpp:137] Memory required for data: 4706560 | |
I0511 18:13:15.538650 522 layer_factory.hpp:77] Creating layer pool2 | |
I0511 18:13:15.538671 522 net.cpp:84] Creating Layer pool2 | |
I0511 18:13:15.538676 522 net.cpp:406] pool2 <- conv2 | |
I0511 18:13:15.538681 522 net.cpp:380] pool2 -> pool2 | |
I0511 18:13:15.538718 522 net.cpp:122] Setting up pool2 | |
I0511 18:13:15.538724 522 net.cpp:129] Top shape: 64 50 4 4 (51200) | |
I0511 18:13:15.538743 522 net.cpp:137] Memory required for data: 4911360 | |
I0511 18:13:15.538745 522 layer_factory.hpp:77] Creating layer ip1 | |
I0511 18:13:15.538753 522 net.cpp:84] Creating Layer ip1 | |
I0511 18:13:15.538758 522 net.cpp:406] ip1 <- pool2 | |
I0511 18:13:15.538763 522 net.cpp:380] ip1 -> ip1 | |
I0511 18:13:15.541519 522 net.cpp:122] Setting up ip1 | |
I0511 18:13:15.541533 522 net.cpp:129] Top shape: 64 500 (32000) | |
I0511 18:13:15.541537 522 net.cpp:137] Memory required for data: 5039360 | |
I0511 18:13:15.541560 522 layer_factory.hpp:77] Creating layer relu1 | |
I0511 18:13:15.541581 522 net.cpp:84] Creating Layer relu1 | |
I0511 18:13:15.541585 522 net.cpp:406] relu1 <- ip1 | |
I0511 18:13:15.541602 522 net.cpp:367] relu1 -> ip1 (in-place) | |
I0511 18:13:15.542383 522 net.cpp:122] Setting up relu1 | |
I0511 18:13:15.542393 522 net.cpp:129] Top shape: 64 500 (32000) | |
I0511 18:13:15.542397 522 net.cpp:137] Memory required for data: 5167360 | |
I0511 18:13:15.542399 522 layer_factory.hpp:77] Creating layer ip2 | |
I0511 18:13:15.542421 522 net.cpp:84] Creating Layer ip2 | |
I0511 18:13:15.542439 522 net.cpp:406] ip2 <- ip1 | |
I0511 18:13:15.542445 522 net.cpp:380] ip2 -> ip2 | |
I0511 18:13:15.543305 522 net.cpp:122] Setting up ip2 | |
I0511 18:13:15.543316 522 net.cpp:129] Top shape: 64 10 (640) | |
I0511 18:13:15.543318 522 net.cpp:137] Memory required for data: 5169920 | |
I0511 18:13:15.543339 522 layer_factory.hpp:77] Creating layer loss | |
I0511 18:13:15.543360 522 net.cpp:84] Creating Layer loss | |
I0511 18:13:15.543365 522 net.cpp:406] loss <- ip2 | |
I0511 18:13:15.543382 522 net.cpp:406] loss <- label | |
I0511 18:13:15.543391 522 net.cpp:380] loss -> loss | |
I0511 18:13:15.543411 522 layer_factory.hpp:77] Creating layer loss | |
I0511 18:13:15.543712 522 net.cpp:122] Setting up loss | |
I0511 18:13:15.543722 522 net.cpp:129] Top shape: (1) | |
I0511 18:13:15.543725 522 net.cpp:132] with loss weight 1 | |
I0511 18:13:15.543748 522 net.cpp:137] Memory required for data: 5169924 | |
I0511 18:13:15.543751 522 net.cpp:198] loss needs backward computation. | |
I0511 18:13:15.543761 522 net.cpp:198] ip2 needs backward computation. | |
I0511 18:13:15.543766 522 net.cpp:198] relu1 needs backward computation. | |
I0511 18:13:15.543768 522 net.cpp:198] ip1 needs backward computation. | |
I0511 18:13:15.543771 522 net.cpp:198] pool2 needs backward computation. | |
I0511 18:13:15.543774 522 net.cpp:198] conv2 needs backward computation. | |
I0511 18:13:15.543777 522 net.cpp:198] pool1 needs backward computation. | |
I0511 18:13:15.543781 522 net.cpp:198] conv1 needs backward computation. | |
I0511 18:13:15.543783 522 net.cpp:200] mnist does not need backward computation. | |
I0511 18:13:15.543787 522 net.cpp:242] This network produces output loss | |
I0511 18:13:15.543794 522 net.cpp:255] Network initialization done. | |
I0511 18:13:15.543949 522 solver.cpp:172] Creating test net (#0) specified by net file: examples/mnist/lenet_train_test.prototxt | |
I0511 18:13:15.543982 522 net.cpp:294] The NetState phase (1) differed from the phase (0) specified by a rule in layer mnist | |
I0511 18:13:15.544051 522 net.cpp:51] Initializing net from parameters: | |
name: "LeNet" | |
state { | |
phase: TEST | |
} | |
layer { | |
name: "mnist" | |
type: "Data" | |
top: "data" | |
top: "label" | |
include { | |
phase: TEST | |
} | |
transform_param { | |
scale: 0.00390625 | |
} | |
data_param { | |
source: "examples/mnist/mnist_test_lmdb" | |
batch_size: 100 | |
backend: LMDB | |
} | |
} | |
layer { | |
name: "conv1" | |
type: "Convolution" | |
bottom: "data" | |
top: "conv1" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
convolution_param { | |
num_output: 20 | |
kernel_size: 5 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "pool1" | |
type: "Pooling" | |
bottom: "conv1" | |
top: "pool1" | |
pooling_param { | |
pool: MAX | |
kernel_size: 2 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "conv2" | |
type: "Convolution" | |
bottom: "pool1" | |
top: "conv2" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
convolution_param { | |
num_output: 50 | |
kernel_size: 5 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "pool2" | |
type: "Pooling" | |
bottom: "conv2" | |
top: "pool2" | |
pooling_param { | |
pool: MAX | |
kernel_size: 2 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "ip1" | |
type: "InnerProduct" | |
bottom: "pool2" | |
top: "ip1" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
inner_product_param { | |
num_output: 500 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "relu1" | |
type: "ReLU" | |
bottom: "ip1" | |
top: "ip1" | |
} | |
layer { | |
name: "ip2" | |
type: "InnerProduct" | |
bottom: "ip1" | |
top: "ip2" | |
param { | |
lr_mult: 1 | |
} | |
param { | |
lr_mult: 2 | |
} | |
inner_product_param { | |
num_output: 10 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "accuracy" | |
type: "Accuracy" | |
bottom: "ip2" | |
bottom: "label" | |
top: "accuracy" | |
include { | |
phase: TEST | |
} | |
} | |
layer { | |
name: "loss" | |
type: "SoftmaxWithLoss" | |
bottom: "ip2" | |
bottom: "label" | |
top: "loss" | |
} | |
I0511 18:13:15.544185 522 layer_factory.hpp:77] Creating layer mnist | |
I0511 18:13:15.544245 522 db_lmdb.cpp:35] Opened lmdb examples/mnist/mnist_test_lmdb | |
I0511 18:13:15.544258 522 net.cpp:84] Creating Layer mnist | |
I0511 18:13:15.544262 522 net.cpp:380] mnist -> data | |
I0511 18:13:15.544268 522 net.cpp:380] mnist -> label | |
I0511 18:13:15.544350 522 data_layer.cpp:45] output data size: 100,1,28,28 | |
I0511 18:13:15.546480 522 net.cpp:122] Setting up mnist | |
I0511 18:13:15.546519 522 net.cpp:129] Top shape: 100 1 28 28 (78400) | |
I0511 18:13:15.546525 522 net.cpp:129] Top shape: 100 (100) | |
I0511 18:13:15.546527 522 net.cpp:137] Memory required for data: 314000 | |
I0511 18:13:15.546533 522 layer_factory.hpp:77] Creating layer label_mnist_1_split | |
I0511 18:13:15.546561 522 net.cpp:84] Creating Layer label_mnist_1_split | |
I0511 18:13:15.546566 522 net.cpp:406] label_mnist_1_split <- label | |
I0511 18:13:15.546571 522 net.cpp:380] label_mnist_1_split -> label_mnist_1_split_0 | |
I0511 18:13:15.546581 522 net.cpp:380] label_mnist_1_split -> label_mnist_1_split_1 | |
I0511 18:13:15.546643 522 net.cpp:122] Setting up label_mnist_1_split | |
I0511 18:13:15.546649 522 net.cpp:129] Top shape: 100 (100) | |
I0511 18:13:15.546653 522 net.cpp:129] Top shape: 100 (100) | |
I0511 18:13:15.546654 522 net.cpp:137] Memory required for data: 314800 | |
I0511 18:13:15.546658 522 layer_factory.hpp:77] Creating layer conv1 | |
I0511 18:13:15.546670 522 net.cpp:84] Creating Layer conv1 | |
I0511 18:13:15.546674 522 net.cpp:406] conv1 <- data | |
I0511 18:13:15.546679 522 net.cpp:380] conv1 -> conv1 | |
I0511 18:13:15.548213 522 net.cpp:122] Setting up conv1 | |
I0511 18:13:15.548245 522 net.cpp:129] Top shape: 100 20 24 24 (1152000) | |
I0511 18:13:15.548250 522 net.cpp:137] Memory required for data: 4922800 | |
I0511 18:13:15.548276 522 layer_factory.hpp:77] Creating layer pool1 | |
I0511 18:13:15.548315 522 net.cpp:84] Creating Layer pool1 | |
I0511 18:13:15.548318 522 net.cpp:406] pool1 <- conv1 | |
I0511 18:13:15.548324 522 net.cpp:380] pool1 -> pool1 | |
I0511 18:13:15.548362 522 net.cpp:122] Setting up pool1 | |
I0511 18:13:15.548367 522 net.cpp:129] Top shape: 100 20 12 12 (288000) | |
I0511 18:13:15.548372 522 net.cpp:137] Memory required for data: 6074800 | |
I0511 18:13:15.548375 522 layer_factory.hpp:77] Creating layer conv2 | |
I0511 18:13:15.548384 522 net.cpp:84] Creating Layer conv2 | |
I0511 18:13:15.548389 522 net.cpp:406] conv2 <- pool1 | |
I0511 18:13:15.548394 522 net.cpp:380] conv2 -> conv2 | |
I0511 18:13:15.549770 522 net.cpp:122] Setting up conv2 | |
I0511 18:13:15.549818 522 net.cpp:129] Top shape: 100 50 8 8 (320000) | |
I0511 18:13:15.549821 522 net.cpp:137] Memory required for data: 7354800 | |
I0511 18:13:15.549832 522 layer_factory.hpp:77] Creating layer pool2 | |
I0511 18:13:15.549844 522 net.cpp:84] Creating Layer pool2 | |
I0511 18:13:15.549847 522 net.cpp:406] pool2 <- conv2 | |
I0511 18:13:15.549851 522 net.cpp:380] pool2 -> pool2 | |
I0511 18:13:15.549888 522 net.cpp:122] Setting up pool2 | |
I0511 18:13:15.549895 522 net.cpp:129] Top shape: 100 50 4 4 (80000) | |
I0511 18:13:15.549897 522 net.cpp:137] Memory required for data: 7674800 | |
I0511 18:13:15.549901 522 layer_factory.hpp:77] Creating layer ip1 | |
I0511 18:13:15.549908 522 net.cpp:84] Creating Layer ip1 | |
I0511 18:13:15.549911 522 net.cpp:406] ip1 <- pool2 | |
I0511 18:13:15.549916 522 net.cpp:380] ip1 -> ip1 | |
I0511 18:13:15.552865 522 net.cpp:122] Setting up ip1 | |
I0511 18:13:15.552901 522 net.cpp:129] Top shape: 100 500 (50000) | |
I0511 18:13:15.552906 522 net.cpp:137] Memory required for data: 7874800 | |
I0511 18:13:15.552937 522 layer_factory.hpp:77] Creating layer relu1 | |
I0511 18:13:15.552961 522 net.cpp:84] Creating Layer relu1 | |
I0511 18:13:15.552965 522 net.cpp:406] relu1 <- ip1 | |
I0511 18:13:15.552989 522 net.cpp:367] relu1 -> ip1 (in-place) | |
I0511 18:13:15.553544 522 net.cpp:122] Setting up relu1 | |
I0511 18:13:15.553557 522 net.cpp:129] Top shape: 100 500 (50000) | |
I0511 18:13:15.553561 522 net.cpp:137] Memory required for data: 8074800 | |
I0511 18:13:15.553565 522 layer_factory.hpp:77] Creating layer ip2 | |
I0511 18:13:15.554859 522 net.cpp:84] Creating Layer ip2 | |
I0511 18:13:15.554870 522 net.cpp:406] ip2 <- ip1 | |
I0511 18:13:15.554883 522 net.cpp:380] ip2 -> ip2 | |
I0511 18:13:15.555135 522 net.cpp:122] Setting up ip2 | |
I0511 18:13:15.555141 522 net.cpp:129] Top shape: 100 10 (1000) | |
I0511 18:13:15.555145 522 net.cpp:137] Memory required for data: 8078800 | |
I0511 18:13:15.555151 522 layer_factory.hpp:77] Creating layer ip2_ip2_0_split | |
I0511 18:13:15.555157 522 net.cpp:84] Creating Layer ip2_ip2_0_split | |
I0511 18:13:15.555160 522 net.cpp:406] ip2_ip2_0_split <- ip2 | |
I0511 18:13:15.555166 522 net.cpp:380] ip2_ip2_0_split -> ip2_ip2_0_split_0 | |
I0511 18:13:15.555171 522 net.cpp:380] ip2_ip2_0_split -> ip2_ip2_0_split_1 | |
I0511 18:13:15.555202 522 net.cpp:122] Setting up ip2_ip2_0_split | |
I0511 18:13:15.555207 522 net.cpp:129] Top shape: 100 10 (1000) | |
I0511 18:13:15.555210 522 net.cpp:129] Top shape: 100 10 (1000) | |
I0511 18:13:15.555213 522 net.cpp:137] Memory required for data: 8086800 | |
I0511 18:13:15.555217 522 layer_factory.hpp:77] Creating layer accuracy | |
I0511 18:13:15.555222 522 net.cpp:84] Creating Layer accuracy | |
I0511 18:13:15.555227 522 net.cpp:406] accuracy <- ip2_ip2_0_split_0 | |
I0511 18:13:15.555230 522 net.cpp:406] accuracy <- label_mnist_1_split_0 | |
I0511 18:13:15.555235 522 net.cpp:380] accuracy -> accuracy | |
I0511 18:13:15.555243 522 net.cpp:122] Setting up accuracy | |
I0511 18:13:15.555248 522 net.cpp:129] Top shape: (1) | |
I0511 18:13:15.555250 522 net.cpp:137] Memory required for data: 8086804 | |
I0511 18:13:15.555253 522 layer_factory.hpp:77] Creating layer loss | |
I0511 18:13:15.555258 522 net.cpp:84] Creating Layer loss | |
I0511 18:13:15.555260 522 net.cpp:406] loss <- ip2_ip2_0_split_1 | |
I0511 18:13:15.555263 522 net.cpp:406] loss <- label_mnist_1_split_1 | |
I0511 18:13:15.555294 522 net.cpp:380] loss -> loss | |
I0511 18:13:15.555302 522 layer_factory.hpp:77] Creating layer loss | |
I0511 18:13:15.556092 522 net.cpp:122] Setting up loss | |
I0511 18:13:15.556102 522 net.cpp:129] Top shape: (1) | |
I0511 18:13:15.556105 522 net.cpp:132] with loss weight 1 | |
I0511 18:13:15.556116 522 net.cpp:137] Memory required for data: 8086808 | |
I0511 18:13:15.556120 522 net.cpp:198] loss needs backward computation. | |
I0511 18:13:15.556125 522 net.cpp:200] accuracy does not need backward computation. | |
I0511 18:13:15.556128 522 net.cpp:198] ip2_ip2_0_split needs backward computation. | |
I0511 18:13:15.556133 522 net.cpp:198] ip2 needs backward computation. | |
I0511 18:13:15.556138 522 net.cpp:198] relu1 needs backward computation. | |
I0511 18:13:15.556140 522 net.cpp:198] ip1 needs backward computation. | |
I0511 18:13:15.556144 522 net.cpp:198] pool2 needs backward computation. | |
I0511 18:13:15.556149 522 net.cpp:198] conv2 needs backward computation. | |
I0511 18:13:15.556152 522 net.cpp:198] pool1 needs backward computation. | |
I0511 18:13:15.556155 522 net.cpp:198] conv1 needs backward computation. | |
I0511 18:13:15.556159 522 net.cpp:200] label_mnist_1_split does not need backward computation. | |
I0511 18:13:15.556164 522 net.cpp:200] mnist does not need backward computation. | |
I0511 18:13:15.556166 522 net.cpp:242] This network produces output accuracy | |
I0511 18:13:15.556170 522 net.cpp:242] This network produces output loss | |
I0511 18:13:15.556180 522 net.cpp:255] Network initialization done. | |
I0511 18:13:15.556228 522 solver.cpp:56] Solver scaffolding done. | |
I0511 18:13:15.556490 522 caffe.cpp:248] Starting Optimization | |
I0511 18:13:15.556499 522 solver.cpp:272] Solving LeNet | |
I0511 18:13:15.556504 522 solver.cpp:273] Learning Rate Policy: inv | |
I0511 18:13:15.557133 522 solver.cpp:330] Iteration 0, Testing net (#0) | |
I0511 18:13:15.564887 522 blocking_queue.cpp:49] Waiting for data | |
I0511 18:13:15.632856 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:15.633618 522 solver.cpp:397] Test net output #0: accuracy = 0.1305 | |
I0511 18:13:15.633646 522 solver.cpp:397] Test net output #1: loss = 2.33209 (* 1 = 2.33209 loss) | |
I0511 18:13:15.636749 522 solver.cpp:218] Iteration 0 (0 iter/s, 0.0802253s/100 iters), loss = 2.31964 | |
I0511 18:13:15.636775 522 solver.cpp:237] Train net output #0: loss = 2.31964 (* 1 = 2.31964 loss) | |
I0511 18:13:15.636787 522 sgd_solver.cpp:105] Iteration 0, lr = 0.01 | |
I0511 18:13:15.797138 522 solver.cpp:218] Iteration 100 (623.635 iter/s, 0.16035s/100 iters), loss = 0.23069 | |
I0511 18:13:15.797174 522 solver.cpp:237] Train net output #0: loss = 0.23069 (* 1 = 0.23069 loss) | |
I0511 18:13:15.797183 522 sgd_solver.cpp:105] Iteration 100, lr = 0.00992565 | |
I0511 18:13:15.946821 522 solver.cpp:218] Iteration 200 (668.805 iter/s, 0.14952s/100 iters), loss = 0.136102 | |
I0511 18:13:15.946861 522 solver.cpp:237] Train net output #0: loss = 0.136102 (* 1 = 0.136102 loss) | |
I0511 18:13:15.946868 522 sgd_solver.cpp:105] Iteration 200, lr = 0.00985258 | |
I0511 18:13:16.091234 522 solver.cpp:218] Iteration 300 (692.686 iter/s, 0.144366s/100 iters), loss = 0.164669 | |
I0511 18:13:16.091267 522 solver.cpp:237] Train net output #0: loss = 0.164668 (* 1 = 0.164668 loss) | |
I0511 18:13:16.091272 522 sgd_solver.cpp:105] Iteration 300, lr = 0.00978075 | |
I0511 18:13:16.236135 522 solver.cpp:218] Iteration 400 (690.323 iter/s, 0.14486s/100 iters), loss = 0.0880876 | |
I0511 18:13:16.236169 522 solver.cpp:237] Train net output #0: loss = 0.0880875 (* 1 = 0.0880875 loss) | |
I0511 18:13:16.236176 522 sgd_solver.cpp:105] Iteration 400, lr = 0.00971013 | |
I0511 18:13:16.378613 522 solver.cpp:330] Iteration 500, Testing net (#0) | |
I0511 18:13:16.437780 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:16.440119 522 solver.cpp:397] Test net output #0: accuracy = 0.9742 | |
I0511 18:13:16.440145 522 solver.cpp:397] Test net output #1: loss = 0.0827839 (* 1 = 0.0827839 loss) | |
I0511 18:13:16.441550 522 solver.cpp:218] Iteration 500 (486.907 iter/s, 0.205378s/100 iters), loss = 0.10879 | |
I0511 18:13:16.441591 522 solver.cpp:237] Train net output #0: loss = 0.10879 (* 1 = 0.10879 loss) | |
I0511 18:13:16.441601 522 sgd_solver.cpp:105] Iteration 500, lr = 0.00964069 | |
I0511 18:13:16.591470 522 solver.cpp:218] Iteration 600 (667.249 iter/s, 0.149869s/100 iters), loss = 0.081639 | |
I0511 18:13:16.591503 522 solver.cpp:237] Train net output #0: loss = 0.0816389 (* 1 = 0.0816389 loss) | |
I0511 18:13:16.591509 522 sgd_solver.cpp:105] Iteration 600, lr = 0.0095724 | |
I0511 18:13:16.737135 522 solver.cpp:218] Iteration 700 (686.705 iter/s, 0.145623s/100 iters), loss = 0.127346 | |
I0511 18:13:16.737169 522 solver.cpp:237] Train net output #0: loss = 0.127346 (* 1 = 0.127346 loss) | |
I0511 18:13:16.737174 522 sgd_solver.cpp:105] Iteration 700, lr = 0.00950522 | |
I0511 18:13:16.883033 522 solver.cpp:218] Iteration 800 (685.598 iter/s, 0.145858s/100 iters), loss = 0.226645 | |
I0511 18:13:16.883067 522 solver.cpp:237] Train net output #0: loss = 0.226645 (* 1 = 0.226645 loss) | |
I0511 18:13:16.883074 522 sgd_solver.cpp:105] Iteration 800, lr = 0.00943913 | |
I0511 18:13:17.028226 522 solver.cpp:218] Iteration 900 (688.948 iter/s, 0.145149s/100 iters), loss = 0.215599 | |
I0511 18:13:17.028270 522 solver.cpp:237] Train net output #0: loss = 0.215599 (* 1 = 0.215599 loss) | |
I0511 18:13:17.028276 522 sgd_solver.cpp:105] Iteration 900, lr = 0.00937411 | |
I0511 18:13:17.078196 527 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:17.174376 522 solver.cpp:330] Iteration 1000, Testing net (#0) | |
I0511 18:13:17.233332 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:17.235406 522 solver.cpp:397] Test net output #0: accuracy = 0.9819 | |
I0511 18:13:17.235430 522 solver.cpp:397] Test net output #1: loss = 0.0564125 (* 1 = 0.0564125 loss) | |
I0511 18:13:17.236907 522 solver.cpp:218] Iteration 1000 (479.575 iter/s, 0.208518s/100 iters), loss = 0.083173 | |
I0511 18:13:17.236925 522 solver.cpp:237] Train net output #0: loss = 0.0831728 (* 1 = 0.0831728 loss) | |
I0511 18:13:17.236934 522 sgd_solver.cpp:105] Iteration 1000, lr = 0.00931012 | |
I0511 18:13:17.383625 522 solver.cpp:218] Iteration 1100 (681.702 iter/s, 0.146692s/100 iters), loss = 0.00793706 | |
I0511 18:13:17.383659 522 solver.cpp:237] Train net output #0: loss = 0.00793688 (* 1 = 0.00793688 loss) | |
I0511 18:13:17.383666 522 sgd_solver.cpp:105] Iteration 1100, lr = 0.00924715 | |
I0511 18:13:17.529206 522 solver.cpp:218] Iteration 1200 (687.11 iter/s, 0.145537s/100 iters), loss = 0.0231605 | |
I0511 18:13:17.529242 522 solver.cpp:237] Train net output #0: loss = 0.0231603 (* 1 = 0.0231603 loss) | |
I0511 18:13:17.529249 522 sgd_solver.cpp:105] Iteration 1200, lr = 0.00918515 | |
I0511 18:13:17.674496 522 solver.cpp:218] Iteration 1300 (688.637 iter/s, 0.145214s/100 iters), loss = 0.0206333 | |
I0511 18:13:17.674530 522 solver.cpp:237] Train net output #0: loss = 0.0206331 (* 1 = 0.0206331 loss) | |
I0511 18:13:17.674535 522 sgd_solver.cpp:105] Iteration 1300, lr = 0.00912412 | |
I0511 18:13:17.820415 522 solver.cpp:218] Iteration 1400 (685.509 iter/s, 0.145877s/100 iters), loss = 0.00657667 | |
I0511 18:13:17.820447 522 solver.cpp:237] Train net output #0: loss = 0.00657648 (* 1 = 0.00657648 loss) | |
I0511 18:13:17.820453 522 sgd_solver.cpp:105] Iteration 1400, lr = 0.00906403 | |
I0511 18:13:17.964541 522 solver.cpp:330] Iteration 1500, Testing net (#0) | |
I0511 18:13:18.023663 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:18.025812 522 solver.cpp:397] Test net output #0: accuracy = 0.9861 | |
I0511 18:13:18.025835 522 solver.cpp:397] Test net output #1: loss = 0.046159 (* 1 = 0.046159 loss) | |
I0511 18:13:18.027185 522 solver.cpp:218] Iteration 1500 (483.71 iter/s, 0.206735s/100 iters), loss = 0.0619466 | |
I0511 18:13:18.027220 522 solver.cpp:237] Train net output #0: loss = 0.0619464 (* 1 = 0.0619464 loss) | |
I0511 18:13:18.027228 522 sgd_solver.cpp:105] Iteration 1500, lr = 0.00900485 | |
I0511 18:13:18.176481 522 solver.cpp:218] Iteration 1600 (669.952 iter/s, 0.149264s/100 iters), loss = 0.112313 | |
I0511 18:13:18.176542 522 solver.cpp:237] Train net output #0: loss = 0.112313 (* 1 = 0.112313 loss) | |
I0511 18:13:18.176548 522 sgd_solver.cpp:105] Iteration 1600, lr = 0.00894657 | |
I0511 18:13:18.322377 522 solver.cpp:218] Iteration 1700 (685.731 iter/s, 0.14583s/100 iters), loss = 0.017051 | |
I0511 18:13:18.322412 522 solver.cpp:237] Train net output #0: loss = 0.0170508 (* 1 = 0.0170508 loss) | |
I0511 18:13:18.322417 522 sgd_solver.cpp:105] Iteration 1700, lr = 0.00888916 | |
I0511 18:13:18.467898 522 solver.cpp:218] Iteration 1800 (687.381 iter/s, 0.14548s/100 iters), loss = 0.0144987 | |
I0511 18:13:18.467931 522 solver.cpp:237] Train net output #0: loss = 0.0144985 (* 1 = 0.0144985 loss) | |
I0511 18:13:18.467938 522 sgd_solver.cpp:105] Iteration 1800, lr = 0.0088326 | |
I0511 18:13:18.570827 527 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:18.616331 522 solver.cpp:218] Iteration 1900 (673.896 iter/s, 0.148391s/100 iters), loss = 0.104943 | |
I0511 18:13:18.616365 522 solver.cpp:237] Train net output #0: loss = 0.104943 (* 1 = 0.104943 loss) | |
I0511 18:13:18.616372 522 sgd_solver.cpp:105] Iteration 1900, lr = 0.00877687 | |
I0511 18:13:18.759907 522 solver.cpp:330] Iteration 2000, Testing net (#0) | |
I0511 18:13:18.820415 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:18.822129 522 solver.cpp:397] Test net output #0: accuracy = 0.9871 | |
I0511 18:13:18.822154 522 solver.cpp:397] Test net output #1: loss = 0.041054 (* 1 = 0.041054 loss) | |
I0511 18:13:18.823673 522 solver.cpp:218] Iteration 2000 (482.386 iter/s, 0.207303s/100 iters), loss = 0.0163311 | |
I0511 18:13:18.823691 522 solver.cpp:237] Train net output #0: loss = 0.0163309 (* 1 = 0.0163309 loss) | |
I0511 18:13:18.823700 522 sgd_solver.cpp:105] Iteration 2000, lr = 0.00872196 | |
I0511 18:13:18.969692 522 solver.cpp:218] Iteration 2100 (684.966 iter/s, 0.145993s/100 iters), loss = 0.0194948 | |
I0511 18:13:18.969727 522 solver.cpp:237] Train net output #0: loss = 0.0194946 (* 1 = 0.0194946 loss) | |
I0511 18:13:18.969732 522 sgd_solver.cpp:105] Iteration 2100, lr = 0.00866784 | |
I0511 18:13:19.115437 522 solver.cpp:218] Iteration 2200 (686.328 iter/s, 0.145703s/100 iters), loss = 0.0156107 | |
I0511 18:13:19.115471 522 solver.cpp:237] Train net output #0: loss = 0.0156106 (* 1 = 0.0156106 loss) | |
I0511 18:13:19.115478 522 sgd_solver.cpp:105] Iteration 2200, lr = 0.0086145 | |
I0511 18:13:19.261158 522 solver.cpp:218] Iteration 2300 (686.437 iter/s, 0.14568s/100 iters), loss = 0.0664188 | |
I0511 18:13:19.261194 522 solver.cpp:237] Train net output #0: loss = 0.0664187 (* 1 = 0.0664187 loss) | |
I0511 18:13:19.261200 522 sgd_solver.cpp:105] Iteration 2300, lr = 0.00856192 | |
I0511 18:13:19.407133 522 solver.cpp:218] Iteration 2400 (685.783 iter/s, 0.145819s/100 iters), loss = 0.00531314 | |
I0511 18:13:19.407166 522 solver.cpp:237] Train net output #0: loss = 0.00531299 (* 1 = 0.00531299 loss) | |
I0511 18:13:19.407172 522 sgd_solver.cpp:105] Iteration 2400, lr = 0.00851008 | |
I0511 18:13:19.551811 522 solver.cpp:330] Iteration 2500, Testing net (#0) | |
I0511 18:13:19.609726 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:19.611837 522 solver.cpp:397] Test net output #0: accuracy = 0.9859 | |
I0511 18:13:19.611865 522 solver.cpp:397] Test net output #1: loss = 0.0455339 (* 1 = 0.0455339 loss) | |
I0511 18:13:19.613373 522 solver.cpp:218] Iteration 2500 (484.971 iter/s, 0.206198s/100 iters), loss = 0.0333312 | |
I0511 18:13:19.613409 522 solver.cpp:237] Train net output #0: loss = 0.0333311 (* 1 = 0.0333311 loss) | |
I0511 18:13:19.613435 522 sgd_solver.cpp:105] Iteration 2500, lr = 0.00845897 | |
I0511 18:13:19.760716 522 solver.cpp:218] Iteration 2600 (679.419 iter/s, 0.147185s/100 iters), loss = 0.0638409 | |
I0511 18:13:19.760751 522 solver.cpp:237] Train net output #0: loss = 0.0638407 (* 1 = 0.0638407 loss) | |
I0511 18:13:19.760756 522 sgd_solver.cpp:105] Iteration 2600, lr = 0.00840857 | |
I0511 18:13:19.907687 522 solver.cpp:218] Iteration 2700 (680.599 iter/s, 0.14693s/100 iters), loss = 0.0564011 | |
I0511 18:13:19.907721 522 solver.cpp:237] Train net output #0: loss = 0.0564009 (* 1 = 0.0564009 loss) | |
I0511 18:13:19.907727 522 sgd_solver.cpp:105] Iteration 2700, lr = 0.00835886 | |
I0511 18:13:20.053236 522 solver.cpp:218] Iteration 2800 (687.257 iter/s, 0.145506s/100 iters), loss = 0.00203006 | |
I0511 18:13:20.053268 522 solver.cpp:237] Train net output #0: loss = 0.00202993 (* 1 = 0.00202993 loss) | |
I0511 18:13:20.053275 522 sgd_solver.cpp:105] Iteration 2800, lr = 0.00830984 | |
I0511 18:13:20.066148 527 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:20.201350 522 solver.cpp:218] Iteration 2900 (675.345 iter/s, 0.148072s/100 iters), loss = 0.0234769 | |
I0511 18:13:20.201383 522 solver.cpp:237] Train net output #0: loss = 0.0234769 (* 1 = 0.0234769 loss) | |
I0511 18:13:20.201388 522 sgd_solver.cpp:105] Iteration 2900, lr = 0.00826148 | |
I0511 18:13:20.343715 522 solver.cpp:330] Iteration 3000, Testing net (#0) | |
I0511 18:13:20.402021 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:20.404017 522 solver.cpp:397] Test net output #0: accuracy = 0.9873 | |
I0511 18:13:20.404072 522 solver.cpp:397] Test net output #1: loss = 0.0373697 (* 1 = 0.0373697 loss) | |
I0511 18:13:20.405506 522 solver.cpp:218] Iteration 3000 (489.913 iter/s, 0.204118s/100 iters), loss = 0.00960388 | |
I0511 18:13:20.405526 522 solver.cpp:237] Train net output #0: loss = 0.00960378 (* 1 = 0.00960378 loss) | |
I0511 18:13:20.405534 522 sgd_solver.cpp:105] Iteration 3000, lr = 0.00821377 | |
I0511 18:13:20.553649 522 solver.cpp:218] Iteration 3100 (680.166 iter/s, 0.147023s/100 iters), loss = 0.0212637 | |
I0511 18:13:20.553680 522 solver.cpp:237] Train net output #0: loss = 0.0212636 (* 1 = 0.0212636 loss) | |
I0511 18:13:20.553686 522 sgd_solver.cpp:105] Iteration 3100, lr = 0.0081667 | |
I0511 18:13:20.700845 522 solver.cpp:218] Iteration 3200 (679.552 iter/s, 0.147156s/100 iters), loss = 0.00852448 | |
I0511 18:13:20.700877 522 solver.cpp:237] Train net output #0: loss = 0.00852438 (* 1 = 0.00852438 loss) | |
I0511 18:13:20.700883 522 sgd_solver.cpp:105] Iteration 3200, lr = 0.00812025 | |
I0511 18:13:20.847440 522 solver.cpp:218] Iteration 3300 (682.348 iter/s, 0.146553s/100 iters), loss = 0.021878 | |
I0511 18:13:20.847476 522 solver.cpp:237] Train net output #0: loss = 0.0218779 (* 1 = 0.0218779 loss) | |
I0511 18:13:20.847484 522 sgd_solver.cpp:105] Iteration 3300, lr = 0.00807442 | |
I0511 18:13:20.993345 522 solver.cpp:218] Iteration 3400 (686.487 iter/s, 0.145669s/100 iters), loss = 0.00832714 | |
I0511 18:13:20.993378 522 solver.cpp:237] Train net output #0: loss = 0.00832703 (* 1 = 0.00832703 loss) | |
I0511 18:13:20.993384 522 sgd_solver.cpp:105] Iteration 3400, lr = 0.00802918 | |
I0511 18:13:21.136865 522 solver.cpp:330] Iteration 3500, Testing net (#0) | |
I0511 18:13:21.196266 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:21.198382 522 solver.cpp:397] Test net output #0: accuracy = 0.9867 | |
I0511 18:13:21.198410 522 solver.cpp:397] Test net output #1: loss = 0.0415424 (* 1 = 0.0415424 loss) | |
I0511 18:13:21.199802 522 solver.cpp:218] Iteration 3500 (484.448 iter/s, 0.206421s/100 iters), loss = 0.00665751 | |
I0511 18:13:21.199821 522 solver.cpp:237] Train net output #0: loss = 0.00665739 (* 1 = 0.00665739 loss) | |
I0511 18:13:21.199831 522 sgd_solver.cpp:105] Iteration 3500, lr = 0.00798454 | |
I0511 18:13:21.344651 522 solver.cpp:218] Iteration 3600 (690.5 iter/s, 0.144823s/100 iters), loss = 0.0309221 | |
I0511 18:13:21.344687 522 solver.cpp:237] Train net output #0: loss = 0.030922 (* 1 = 0.030922 loss) | |
I0511 18:13:21.344693 522 sgd_solver.cpp:105] Iteration 3600, lr = 0.00794046 | |
I0511 18:13:21.491075 522 solver.cpp:218] Iteration 3700 (683.786 iter/s, 0.146245s/100 iters), loss = 0.0109689 | |
I0511 18:13:21.491107 522 solver.cpp:237] Train net output #0: loss = 0.0109688 (* 1 = 0.0109688 loss) | |
I0511 18:13:21.491142 522 sgd_solver.cpp:105] Iteration 3700, lr = 0.00789695 | |
I0511 18:13:21.558027 527 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:21.638844 522 solver.cpp:218] Iteration 3800 (676.923 iter/s, 0.147727s/100 iters), loss = 0.00859293 | |
I0511 18:13:21.638876 522 solver.cpp:237] Train net output #0: loss = 0.00859279 (* 1 = 0.00859279 loss) | |
I0511 18:13:21.638881 522 sgd_solver.cpp:105] Iteration 3800, lr = 0.007854 | |
I0511 18:13:21.785058 522 solver.cpp:218] Iteration 3900 (684.119 iter/s, 0.146173s/100 iters), loss = 0.0278121 | |
I0511 18:13:21.785091 522 solver.cpp:237] Train net output #0: loss = 0.0278119 (* 1 = 0.0278119 loss) | |
I0511 18:13:21.785096 522 sgd_solver.cpp:105] Iteration 3900, lr = 0.00781158 | |
I0511 18:13:21.929765 522 solver.cpp:330] Iteration 4000, Testing net (#0) | |
I0511 18:13:21.989279 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:21.990739 522 solver.cpp:397] Test net output #0: accuracy = 0.9889 | |
I0511 18:13:21.990762 522 solver.cpp:397] Test net output #1: loss = 0.0308643 (* 1 = 0.0308643 loss) | |
I0511 18:13:21.992018 522 solver.cpp:218] Iteration 4000 (483.27 iter/s, 0.206924s/100 iters), loss = 0.015232 | |
I0511 18:13:21.992035 522 solver.cpp:237] Train net output #0: loss = 0.0152319 (* 1 = 0.0152319 loss) | |
I0511 18:13:21.992043 522 sgd_solver.cpp:105] Iteration 4000, lr = 0.0077697 | |
I0511 18:13:22.138000 522 solver.cpp:218] Iteration 4100 (685.148 iter/s, 0.145954s/100 iters), loss = 0.0402459 | |
I0511 18:13:22.138032 522 solver.cpp:237] Train net output #0: loss = 0.0402458 (* 1 = 0.0402458 loss) | |
I0511 18:13:22.138038 522 sgd_solver.cpp:105] Iteration 4100, lr = 0.00772833 | |
I0511 18:13:22.283874 522 solver.cpp:218] Iteration 4200 (685.716 iter/s, 0.145833s/100 iters), loss = 0.0181314 | |
I0511 18:13:22.283906 522 solver.cpp:237] Train net output #0: loss = 0.0181313 (* 1 = 0.0181313 loss) | |
I0511 18:13:22.283912 522 sgd_solver.cpp:105] Iteration 4200, lr = 0.00768748 | |
I0511 18:13:22.429419 522 solver.cpp:218] Iteration 4300 (687.28 iter/s, 0.145501s/100 iters), loss = 0.0389803 | |
I0511 18:13:22.429458 522 solver.cpp:237] Train net output #0: loss = 0.0389802 (* 1 = 0.0389802 loss) | |
I0511 18:13:22.429466 522 sgd_solver.cpp:105] Iteration 4300, lr = 0.00764712 | |
I0511 18:13:22.575494 522 solver.cpp:218] Iteration 4400 (685.182 iter/s, 0.145947s/100 iters), loss = 0.0210854 | |
I0511 18:13:22.575526 522 solver.cpp:237] Train net output #0: loss = 0.0210852 (* 1 = 0.0210852 loss) | |
I0511 18:13:22.575531 522 sgd_solver.cpp:105] Iteration 4400, lr = 0.00760726 | |
I0511 18:13:22.720794 522 solver.cpp:330] Iteration 4500, Testing net (#0) | |
I0511 18:13:22.783684 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:22.785125 522 solver.cpp:397] Test net output #0: accuracy = 0.9887 | |
I0511 18:13:22.785150 522 solver.cpp:397] Test net output #1: loss = 0.0357179 (* 1 = 0.0357179 loss) | |
I0511 18:13:22.786545 522 solver.cpp:218] Iteration 4500 (473.898 iter/s, 0.211016s/100 iters), loss = 0.00440531 | |
I0511 18:13:22.786563 522 solver.cpp:237] Train net output #0: loss = 0.00440517 (* 1 = 0.00440517 loss) | |
I0511 18:13:22.786571 522 sgd_solver.cpp:105] Iteration 4500, lr = 0.00756788 | |
I0511 18:13:22.931895 522 solver.cpp:218] Iteration 4600 (688.122 iter/s, 0.145323s/100 iters), loss = 0.0116534 | |
I0511 18:13:22.931931 522 solver.cpp:237] Train net output #0: loss = 0.0116532 (* 1 = 0.0116532 loss) | |
I0511 18:13:22.931938 522 sgd_solver.cpp:105] Iteration 4600, lr = 0.00752897 | |
I0511 18:13:23.055052 527 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:23.079150 522 solver.cpp:218] Iteration 4700 (679.305 iter/s, 0.147209s/100 iters), loss = 0.00333786 | |
I0511 18:13:23.079187 522 solver.cpp:237] Train net output #0: loss = 0.0033377 (* 1 = 0.0033377 loss) | |
I0511 18:13:23.079195 522 sgd_solver.cpp:105] Iteration 4700, lr = 0.00749052 | |
I0511 18:13:23.225785 522 solver.cpp:218] Iteration 4800 (682.314 iter/s, 0.14656s/100 iters), loss = 0.014052 | |
I0511 18:13:23.225845 522 solver.cpp:237] Train net output #0: loss = 0.0140519 (* 1 = 0.0140519 loss) | |
I0511 18:13:23.225852 522 sgd_solver.cpp:105] Iteration 4800, lr = 0.00745253 | |
I0511 18:13:23.371039 522 solver.cpp:218] Iteration 4900 (688.768 iter/s, 0.145187s/100 iters), loss = 0.00534247 | |
I0511 18:13:23.371071 522 solver.cpp:237] Train net output #0: loss = 0.00534232 (* 1 = 0.00534232 loss) | |
I0511 18:13:23.371078 522 sgd_solver.cpp:105] Iteration 4900, lr = 0.00741498 | |
I0511 18:13:23.516530 522 solver.cpp:447] Snapshotting to binary proto file examples/mnist/lenet_iter_5000.caffemodel | |
I0511 18:13:23.523075 522 sgd_solver.cpp:273] Snapshotting solver state to binary proto file examples/mnist/lenet_iter_5000.solverstate | |
I0511 18:13:23.525369 522 solver.cpp:330] Iteration 5000, Testing net (#0) | |
I0511 18:13:23.583453 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:23.585058 522 solver.cpp:397] Test net output #0: accuracy = 0.9893 | |
I0511 18:13:23.585083 522 solver.cpp:397] Test net output #1: loss = 0.0301518 (* 1 = 0.0301518 loss) | |
I0511 18:13:23.586496 522 solver.cpp:218] Iteration 5000 (464.208 iter/s, 0.215421s/100 iters), loss = 0.0314256 | |
I0511 18:13:23.586529 522 solver.cpp:237] Train net output #0: loss = 0.0314255 (* 1 = 0.0314255 loss) | |
I0511 18:13:23.586537 522 sgd_solver.cpp:105] Iteration 5000, lr = 0.00737788 | |
I0511 18:13:23.733778 522 solver.cpp:218] Iteration 5100 (679.14 iter/s, 0.147245s/100 iters), loss = 0.0165372 | |
I0511 18:13:23.733811 522 solver.cpp:237] Train net output #0: loss = 0.016537 (* 1 = 0.016537 loss) | |
I0511 18:13:23.733819 522 sgd_solver.cpp:105] Iteration 5100, lr = 0.0073412 | |
I0511 18:13:23.881382 522 solver.cpp:218] Iteration 5200 (677.69 iter/s, 0.14756s/100 iters), loss = 0.00644915 | |
I0511 18:13:23.881418 522 solver.cpp:237] Train net output #0: loss = 0.00644896 (* 1 = 0.00644896 loss) | |
I0511 18:13:23.881423 522 sgd_solver.cpp:105] Iteration 5200, lr = 0.00730495 | |
I0511 18:13:24.027318 522 solver.cpp:218] Iteration 5300 (685.966 iter/s, 0.14578s/100 iters), loss = 0.000801772 | |
I0511 18:13:24.027353 522 solver.cpp:237] Train net output #0: loss = 0.000801578 (* 1 = 0.000801578 loss) | |
I0511 18:13:24.027357 522 sgd_solver.cpp:105] Iteration 5300, lr = 0.00726911 | |
I0511 18:13:24.174664 522 solver.cpp:218] Iteration 5400 (678.876 iter/s, 0.147302s/100 iters), loss = 0.00711174 | |
I0511 18:13:24.174695 522 solver.cpp:237] Train net output #0: loss = 0.00711156 (* 1 = 0.00711156 loss) | |
I0511 18:13:24.174701 522 sgd_solver.cpp:105] Iteration 5400, lr = 0.00723368 | |
I0511 18:13:24.319320 522 solver.cpp:330] Iteration 5500, Testing net (#0) | |
I0511 18:13:24.377249 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:24.379405 522 solver.cpp:397] Test net output #0: accuracy = 0.99 | |
I0511 18:13:24.379559 522 solver.cpp:397] Test net output #1: loss = 0.0318279 (* 1 = 0.0318279 loss) | |
I0511 18:13:24.381283 522 solver.cpp:218] Iteration 5500 (484.077 iter/s, 0.206579s/100 iters), loss = 0.0179214 | |
I0511 18:13:24.381309 522 solver.cpp:237] Train net output #0: loss = 0.0179212 (* 1 = 0.0179212 loss) | |
I0511 18:13:24.381319 522 sgd_solver.cpp:105] Iteration 5500, lr = 0.00719865 | |
I0511 18:13:24.531106 522 solver.cpp:218] Iteration 5600 (672.471 iter/s, 0.148705s/100 iters), loss = 0.00110432 | |
I0511 18:13:24.531141 522 solver.cpp:237] Train net output #0: loss = 0.00110413 (* 1 = 0.00110413 loss) | |
I0511 18:13:24.531147 522 sgd_solver.cpp:105] Iteration 5600, lr = 0.00716402 | |
I0511 18:13:24.562065 527 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:24.677906 522 solver.cpp:218] Iteration 5700 (681.929 iter/s, 0.146643s/100 iters), loss = 0.00285689 | |
I0511 18:13:24.677940 522 solver.cpp:237] Train net output #0: loss = 0.00285671 (* 1 = 0.00285671 loss) | |
I0511 18:13:24.677945 522 sgd_solver.cpp:105] Iteration 5700, lr = 0.00712977 | |
I0511 18:13:24.826856 522 solver.cpp:218] Iteration 5800 (672.027 iter/s, 0.148804s/100 iters), loss = 0.0375963 | |
I0511 18:13:24.826916 522 solver.cpp:237] Train net output #0: loss = 0.0375961 (* 1 = 0.0375961 loss) | |
I0511 18:13:24.826922 522 sgd_solver.cpp:105] Iteration 5800, lr = 0.0070959 | |
I0511 18:13:24.972806 522 solver.cpp:218] Iteration 5900 (685.476 iter/s, 0.145884s/100 iters), loss = 0.00753866 | |
I0511 18:13:24.972841 522 solver.cpp:237] Train net output #0: loss = 0.00753848 (* 1 = 0.00753848 loss) | |
I0511 18:13:24.972847 522 sgd_solver.cpp:105] Iteration 5900, lr = 0.0070624 | |
I0511 18:13:25.117579 522 solver.cpp:330] Iteration 6000, Testing net (#0) | |
I0511 18:13:25.177829 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:25.180385 522 solver.cpp:397] Test net output #0: accuracy = 0.9907 | |
I0511 18:13:25.180413 522 solver.cpp:397] Test net output #1: loss = 0.0283849 (* 1 = 0.0283849 loss) | |
I0511 18:13:25.182206 522 solver.cpp:218] Iteration 6000 (477.677 iter/s, 0.209346s/100 iters), loss = 0.00364922 | |
I0511 18:13:25.182230 522 solver.cpp:237] Train net output #0: loss = 0.00364905 (* 1 = 0.00364905 loss) | |
I0511 18:13:25.182241 522 sgd_solver.cpp:105] Iteration 6000, lr = 0.00702927 | |
I0511 18:13:25.329059 522 solver.cpp:218] Iteration 6100 (681.11 iter/s, 0.146819s/100 iters), loss = 0.00189618 | |
I0511 18:13:25.329097 522 solver.cpp:237] Train net output #0: loss = 0.001896 (* 1 = 0.001896 loss) | |
I0511 18:13:25.329102 522 sgd_solver.cpp:105] Iteration 6100, lr = 0.0069965 | |
I0511 18:13:25.475404 522 solver.cpp:218] Iteration 6200 (683.676 iter/s, 0.146268s/100 iters), loss = 0.0114807 | |
I0511 18:13:25.475436 522 solver.cpp:237] Train net output #0: loss = 0.0114806 (* 1 = 0.0114806 loss) | |
I0511 18:13:25.475442 522 sgd_solver.cpp:105] Iteration 6200, lr = 0.00696408 | |
I0511 18:13:25.621726 522 solver.cpp:218] Iteration 6300 (683.611 iter/s, 0.146282s/100 iters), loss = 0.00935609 | |
I0511 18:13:25.621759 522 solver.cpp:237] Train net output #0: loss = 0.00935592 (* 1 = 0.00935592 loss) | |
I0511 18:13:25.621765 522 sgd_solver.cpp:105] Iteration 6300, lr = 0.00693201 | |
I0511 18:13:25.769366 522 solver.cpp:218] Iteration 6400 (677.529 iter/s, 0.147595s/100 iters), loss = 0.0066119 | |
I0511 18:13:25.769410 522 solver.cpp:237] Train net output #0: loss = 0.00661173 (* 1 = 0.00661173 loss) | |
I0511 18:13:25.769418 522 sgd_solver.cpp:105] Iteration 6400, lr = 0.00690029 | |
I0511 18:13:25.916312 522 solver.cpp:330] Iteration 6500, Testing net (#0) | |
I0511 18:13:25.975299 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:25.976896 522 solver.cpp:397] Test net output #0: accuracy = 0.9905 | |
I0511 18:13:25.976920 522 solver.cpp:397] Test net output #1: loss = 0.0312985 (* 1 = 0.0312985 loss) | |
I0511 18:13:25.978351 522 solver.cpp:218] Iteration 6500 (478.611 iter/s, 0.208938s/100 iters), loss = 0.0117257 | |
I0511 18:13:25.978369 522 solver.cpp:237] Train net output #0: loss = 0.0117255 (* 1 = 0.0117255 loss) | |
I0511 18:13:25.978377 522 sgd_solver.cpp:105] Iteration 6500, lr = 0.0068689 | |
I0511 18:13:26.064824 527 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:26.127346 522 solver.cpp:218] Iteration 6600 (672.244 iter/s, 0.148755s/100 iters), loss = 0.0246324 | |
I0511 18:13:26.127377 522 solver.cpp:237] Train net output #0: loss = 0.0246322 (* 1 = 0.0246322 loss) | |
I0511 18:13:26.127383 522 sgd_solver.cpp:105] Iteration 6600, lr = 0.00683784 | |
I0511 18:13:26.273828 522 solver.cpp:218] Iteration 6700 (682.855 iter/s, 0.146444s/100 iters), loss = 0.0110486 | |
I0511 18:13:26.273862 522 solver.cpp:237] Train net output #0: loss = 0.0110484 (* 1 = 0.0110484 loss) | |
I0511 18:13:26.273867 522 sgd_solver.cpp:105] Iteration 6700, lr = 0.00680711 | |
I0511 18:13:26.422065 522 solver.cpp:218] Iteration 6800 (674.787 iter/s, 0.148195s/100 iters), loss = 0.0046916 | |
I0511 18:13:26.422096 522 solver.cpp:237] Train net output #0: loss = 0.00469141 (* 1 = 0.00469141 loss) | |
I0511 18:13:26.422116 522 sgd_solver.cpp:105] Iteration 6800, lr = 0.0067767 | |
I0511 18:13:26.569128 522 solver.cpp:218] Iteration 6900 (680.168 iter/s, 0.147022s/100 iters), loss = 0.00297426 | |
I0511 18:13:26.569187 522 solver.cpp:237] Train net output #0: loss = 0.00297408 (* 1 = 0.00297408 loss) | |
I0511 18:13:26.569195 522 sgd_solver.cpp:105] Iteration 6900, lr = 0.0067466 | |
I0511 18:13:26.712939 522 solver.cpp:330] Iteration 7000, Testing net (#0) | |
I0511 18:13:26.771999 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:26.774022 522 solver.cpp:397] Test net output #0: accuracy = 0.9896 | |
I0511 18:13:26.774050 522 solver.cpp:397] Test net output #1: loss = 0.0316401 (* 1 = 0.0316401 loss) | |
I0511 18:13:26.776863 522 solver.cpp:218] Iteration 7000 (481.526 iter/s, 0.207673s/100 iters), loss = 0.00752981 | |
I0511 18:13:26.776882 522 solver.cpp:237] Train net output #0: loss = 0.00752964 (* 1 = 0.00752964 loss) | |
I0511 18:13:26.776890 522 sgd_solver.cpp:105] Iteration 7000, lr = 0.00671681 | |
I0511 18:13:26.923746 522 solver.cpp:218] Iteration 7100 (680.944 iter/s, 0.146855s/100 iters), loss = 0.0114956 | |
I0511 18:13:26.923779 522 solver.cpp:237] Train net output #0: loss = 0.0114955 (* 1 = 0.0114955 loss) | |
I0511 18:13:26.923784 522 sgd_solver.cpp:105] Iteration 7100, lr = 0.00668733 | |
I0511 18:13:27.071755 522 solver.cpp:218] Iteration 7200 (675.826 iter/s, 0.147967s/100 iters), loss = 0.00500658 | |
I0511 18:13:27.071789 522 solver.cpp:237] Train net output #0: loss = 0.00500643 (* 1 = 0.00500643 loss) | |
I0511 18:13:27.071794 522 sgd_solver.cpp:105] Iteration 7200, lr = 0.00665815 | |
I0511 18:13:27.219657 522 solver.cpp:218] Iteration 7300 (676.313 iter/s, 0.14786s/100 iters), loss = 0.0206898 | |
I0511 18:13:27.219691 522 solver.cpp:237] Train net output #0: loss = 0.0206897 (* 1 = 0.0206897 loss) | |
I0511 18:13:27.219696 522 sgd_solver.cpp:105] Iteration 7300, lr = 0.00662927 | |
I0511 18:13:27.365396 522 solver.cpp:218] Iteration 7400 (686.352 iter/s, 0.145698s/100 iters), loss = 0.00565326 | |
I0511 18:13:27.365506 522 solver.cpp:237] Train net output #0: loss = 0.0056531 (* 1 = 0.0056531 loss) | |
I0511 18:13:27.365514 522 sgd_solver.cpp:105] Iteration 7400, lr = 0.00660067 | |
I0511 18:13:27.505698 527 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:27.510330 522 solver.cpp:330] Iteration 7500, Testing net (#0) | |
I0511 18:13:27.570782 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:27.572875 522 solver.cpp:397] Test net output #0: accuracy = 0.9899 | |
I0511 18:13:27.572896 522 solver.cpp:397] Test net output #1: loss = 0.0319173 (* 1 = 0.0319173 loss) | |
I0511 18:13:27.574172 522 solver.cpp:218] Iteration 7500 (479.24 iter/s, 0.208664s/100 iters), loss = 0.00329135 | |
I0511 18:13:27.574193 522 solver.cpp:237] Train net output #0: loss = 0.0032912 (* 1 = 0.0032912 loss) | |
I0511 18:13:27.574201 522 sgd_solver.cpp:105] Iteration 7500, lr = 0.00657236 | |
I0511 18:13:27.721329 522 solver.cpp:218] Iteration 7600 (679.684 iter/s, 0.147127s/100 iters), loss = 0.00848904 | |
I0511 18:13:27.721362 522 solver.cpp:237] Train net output #0: loss = 0.00848889 (* 1 = 0.00848889 loss) | |
I0511 18:13:27.721369 522 sgd_solver.cpp:105] Iteration 7600, lr = 0.00654433 | |
I0511 18:13:27.867367 522 solver.cpp:218] Iteration 7700 (684.951 iter/s, 0.145996s/100 iters), loss = 0.0191951 | |
I0511 18:13:27.867403 522 solver.cpp:237] Train net output #0: loss = 0.0191949 (* 1 = 0.0191949 loss) | |
I0511 18:13:27.867408 522 sgd_solver.cpp:105] Iteration 7700, lr = 0.00651658 | |
I0511 18:13:28.014005 522 solver.cpp:218] Iteration 7800 (682.167 iter/s, 0.146592s/100 iters), loss = 0.00262706 | |
I0511 18:13:28.014055 522 solver.cpp:237] Train net output #0: loss = 0.00262691 (* 1 = 0.00262691 loss) | |
I0511 18:13:28.014065 522 sgd_solver.cpp:105] Iteration 7800, lr = 0.00648911 | |
I0511 18:13:28.159793 522 solver.cpp:218] Iteration 7900 (686.352 iter/s, 0.145698s/100 iters), loss = 0.00477146 | |
I0511 18:13:28.159826 522 solver.cpp:237] Train net output #0: loss = 0.00477131 (* 1 = 0.00477131 loss) | |
I0511 18:13:28.159832 522 sgd_solver.cpp:105] Iteration 7900, lr = 0.0064619 | |
I0511 18:13:28.304602 522 solver.cpp:330] Iteration 8000, Testing net (#0) | |
I0511 18:13:28.363370 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:28.366027 522 solver.cpp:397] Test net output #0: accuracy = 0.9901 | |
I0511 18:13:28.366067 522 solver.cpp:397] Test net output #1: loss = 0.0294478 (* 1 = 0.0294478 loss) | |
I0511 18:13:28.367393 522 solver.cpp:218] Iteration 8000 (481.78 iter/s, 0.207564s/100 iters), loss = 0.00637796 | |
I0511 18:13:28.367413 522 solver.cpp:237] Train net output #0: loss = 0.00637782 (* 1 = 0.00637782 loss) | |
I0511 18:13:28.367420 522 sgd_solver.cpp:105] Iteration 8000, lr = 0.00643496 | |
I0511 18:13:28.515964 522 solver.cpp:218] Iteration 8100 (673.203 iter/s, 0.148543s/100 iters), loss = 0.0107222 | |
I0511 18:13:28.515998 522 solver.cpp:237] Train net output #0: loss = 0.010722 (* 1 = 0.010722 loss) | |
I0511 18:13:28.516005 522 sgd_solver.cpp:105] Iteration 8100, lr = 0.00640827 | |
I0511 18:13:28.661173 522 solver.cpp:218] Iteration 8200 (688.875 iter/s, 0.145164s/100 iters), loss = 0.00806826 | |
I0511 18:13:28.661219 522 solver.cpp:237] Train net output #0: loss = 0.00806812 (* 1 = 0.00806812 loss) | |
I0511 18:13:28.661226 522 sgd_solver.cpp:105] Iteration 8200, lr = 0.00638185 | |
I0511 18:13:28.808029 522 solver.cpp:218] Iteration 8300 (681.681 iter/s, 0.146696s/100 iters), loss = 0.0342842 | |
I0511 18:13:28.808063 522 solver.cpp:237] Train net output #0: loss = 0.0342841 (* 1 = 0.0342841 loss) | |
I0511 18:13:28.808068 522 sgd_solver.cpp:105] Iteration 8300, lr = 0.00635567 | |
I0511 18:13:28.954053 522 solver.cpp:218] Iteration 8400 (685.01 iter/s, 0.145983s/100 iters), loss = 0.00681243 | |
I0511 18:13:28.954087 522 solver.cpp:237] Train net output #0: loss = 0.00681229 (* 1 = 0.00681229 loss) | |
I0511 18:13:28.954094 522 sgd_solver.cpp:105] Iteration 8400, lr = 0.00632975 | |
I0511 18:13:29.004021 527 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:29.100819 522 solver.cpp:330] Iteration 8500, Testing net (#0) | |
I0511 18:13:29.159569 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:29.162251 522 solver.cpp:397] Test net output #0: accuracy = 0.9909 | |
I0511 18:13:29.162277 522 solver.cpp:397] Test net output #1: loss = 0.029258 (* 1 = 0.029258 loss) | |
I0511 18:13:29.163722 522 solver.cpp:218] Iteration 8500 (477.04 iter/s, 0.209626s/100 iters), loss = 0.00784863 | |
I0511 18:13:29.163755 522 solver.cpp:237] Train net output #0: loss = 0.00784848 (* 1 = 0.00784848 loss) | |
I0511 18:13:29.163779 522 sgd_solver.cpp:105] Iteration 8500, lr = 0.00630407 | |
I0511 18:13:29.312664 522 solver.cpp:218] Iteration 8600 (671.585 iter/s, 0.148902s/100 iters), loss = 0.00079122 | |
I0511 18:13:29.312713 522 solver.cpp:237] Train net output #0: loss = 0.00079108 (* 1 = 0.00079108 loss) | |
I0511 18:13:29.312736 522 sgd_solver.cpp:105] Iteration 8600, lr = 0.00627864 | |
I0511 18:13:29.458751 522 solver.cpp:218] Iteration 8700 (684.788 iter/s, 0.146031s/100 iters), loss = 0.00329326 | |
I0511 18:13:29.458784 522 solver.cpp:237] Train net output #0: loss = 0.00329312 (* 1 = 0.00329312 loss) | |
I0511 18:13:29.458789 522 sgd_solver.cpp:105] Iteration 8700, lr = 0.00625344 | |
I0511 18:13:29.606690 522 solver.cpp:218] Iteration 8800 (676.14 iter/s, 0.147898s/100 iters), loss = 0.00163464 | |
I0511 18:13:29.606722 522 solver.cpp:237] Train net output #0: loss = 0.0016345 (* 1 = 0.0016345 loss) | |
I0511 18:13:29.606729 522 sgd_solver.cpp:105] Iteration 8800, lr = 0.00622847 | |
I0511 18:13:29.753203 522 solver.cpp:218] Iteration 8900 (682.725 iter/s, 0.146472s/100 iters), loss = 0.000360605 | |
I0511 18:13:29.753237 522 solver.cpp:237] Train net output #0: loss = 0.000360461 (* 1 = 0.000360461 loss) | |
I0511 18:13:29.753242 522 sgd_solver.cpp:105] Iteration 8900, lr = 0.00620374 | |
I0511 18:13:29.897269 522 solver.cpp:330] Iteration 9000, Testing net (#0) | |
I0511 18:13:29.956251 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:29.958307 522 solver.cpp:397] Test net output #0: accuracy = 0.99 | |
I0511 18:13:29.958350 522 solver.cpp:397] Test net output #1: loss = 0.0294408 (* 1 = 0.0294408 loss) | |
I0511 18:13:29.959731 522 solver.cpp:218] Iteration 9000 (484.282 iter/s, 0.206491s/100 iters), loss = 0.0125023 | |
I0511 18:13:29.959749 522 solver.cpp:237] Train net output #0: loss = 0.0125022 (* 1 = 0.0125022 loss) | |
I0511 18:13:29.959758 522 sgd_solver.cpp:105] Iteration 9000, lr = 0.00617924 | |
I0511 18:13:30.106549 522 solver.cpp:218] Iteration 9100 (681.243 iter/s, 0.14679s/100 iters), loss = 0.00944676 | |
I0511 18:13:30.106582 522 solver.cpp:237] Train net output #0: loss = 0.00944661 (* 1 = 0.00944661 loss) | |
I0511 18:13:30.106588 522 sgd_solver.cpp:105] Iteration 9100, lr = 0.00615496 | |
I0511 18:13:30.252221 522 solver.cpp:218] Iteration 9200 (686.666 iter/s, 0.145631s/100 iters), loss = 0.00169064 | |
I0511 18:13:30.252255 522 solver.cpp:237] Train net output #0: loss = 0.00169048 (* 1 = 0.00169048 loss) | |
I0511 18:13:30.252261 522 sgd_solver.cpp:105] Iteration 9200, lr = 0.0061309 | |
I0511 18:13:30.397877 522 solver.cpp:218] Iteration 9300 (686.744 iter/s, 0.145615s/100 iters), loss = 0.00590399 | |
I0511 18:13:30.397912 522 solver.cpp:237] Train net output #0: loss = 0.00590384 (* 1 = 0.00590384 loss) | |
I0511 18:13:30.397918 522 sgd_solver.cpp:105] Iteration 9300, lr = 0.00610706 | |
I0511 18:13:30.501073 527 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:30.546329 522 solver.cpp:218] Iteration 9400 (673.817 iter/s, 0.148408s/100 iters), loss = 0.0222062 | |
I0511 18:13:30.546365 522 solver.cpp:237] Train net output #0: loss = 0.022206 (* 1 = 0.022206 loss) | |
I0511 18:13:30.546372 522 sgd_solver.cpp:105] Iteration 9400, lr = 0.00608343 | |
I0511 18:13:30.689489 522 solver.cpp:330] Iteration 9500, Testing net (#0) | |
I0511 18:13:30.749977 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:30.750983 522 solver.cpp:397] Test net output #0: accuracy = 0.9886 | |
I0511 18:13:30.751006 522 solver.cpp:397] Test net output #1: loss = 0.0348373 (* 1 = 0.0348373 loss) | |
I0511 18:13:30.752398 522 solver.cpp:218] Iteration 9500 (485.365 iter/s, 0.20603s/100 iters), loss = 0.00510515 | |
I0511 18:13:30.752418 522 solver.cpp:237] Train net output #0: loss = 0.00510499 (* 1 = 0.00510499 loss) | |
I0511 18:13:30.752425 522 sgd_solver.cpp:105] Iteration 9500, lr = 0.00606002 | |
I0511 18:13:30.901768 522 solver.cpp:218] Iteration 9600 (669.672 iter/s, 0.149327s/100 iters), loss = 0.00206747 | |
I0511 18:13:30.901801 522 solver.cpp:237] Train net output #0: loss = 0.00206731 (* 1 = 0.00206731 loss) | |
I0511 18:13:30.901808 522 sgd_solver.cpp:105] Iteration 9600, lr = 0.00603682 | |
I0511 18:13:31.048707 522 solver.cpp:218] Iteration 9700 (680.75 iter/s, 0.146897s/100 iters), loss = 0.0022947 | |
I0511 18:13:31.048831 522 solver.cpp:237] Train net output #0: loss = 0.00229454 (* 1 = 0.00229454 loss) | |
I0511 18:13:31.048842 522 sgd_solver.cpp:105] Iteration 9700, lr = 0.00601382 | |
I0511 18:13:31.197224 522 solver.cpp:218] Iteration 9800 (674.045 iter/s, 0.148358s/100 iters), loss = 0.0104893 | |
I0511 18:13:31.197260 522 solver.cpp:237] Train net output #0: loss = 0.0104892 (* 1 = 0.0104892 loss) | |
I0511 18:13:31.197268 522 sgd_solver.cpp:105] Iteration 9800, lr = 0.00599102 | |
I0511 18:13:31.343189 522 solver.cpp:218] Iteration 9900 (685.87 iter/s, 0.1458s/100 iters), loss = 0.00333754 | |
I0511 18:13:31.343222 522 solver.cpp:237] Train net output #0: loss = 0.00333739 (* 1 = 0.00333739 loss) | |
I0511 18:13:31.343227 522 sgd_solver.cpp:105] Iteration 9900, lr = 0.00596843 | |
I0511 18:13:31.488759 522 solver.cpp:447] Snapshotting to binary proto file examples/mnist/lenet_iter_10000.caffemodel | |
I0511 18:13:31.493667 522 sgd_solver.cpp:273] Snapshotting solver state to binary proto file examples/mnist/lenet_iter_10000.solverstate | |
I0511 18:13:31.496690 522 solver.cpp:310] Iteration 10000, loss = 0.00347438 | |
I0511 18:13:31.496726 522 solver.cpp:330] Iteration 10000, Testing net (#0) | |
I0511 18:13:31.556267 528 data_layer.cpp:73] Restarting data prefetching from start. | |
I0511 18:13:31.558285 522 solver.cpp:397] Test net output #0: accuracy = 0.9906 | |
I0511 18:13:31.558308 522 solver.cpp:397] Test net output #1: loss = 0.0289746 (* 1 = 0.0289746 loss) | |
I0511 18:13:31.558313 522 solver.cpp:315] Optimization Done. | |
I0511 18:13:31.558317 522 caffe.cpp:259] Optimization Done. | |
real 0m17.463s | |
user 0m13.620s | |
sys 0m6.268s | |
root@0a1c1b0432ff:/opt/caffe# ls | |
CMakeLists.txt INSTALL.md Makefile.config build data examples models src | |
CONTRIBUTING.md LICENSE Makefile.config.example caffe.cloc docker include python tools | |
CONTRIBUTORS.md Makefile README.md cmake docs matlab scripts | |
root@0a1c1b0432ff:/opt/caffe# |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment