From 5b557340fbb8630eefaac45025ef768cccabcb6b Mon Sep 17 00:00:00 2001 From: yihui-he Date: Mon, 16 Jan 2017 10:48:45 +0800 Subject: [PATCH] resnet32 --- .gitignore | 2 +- README.md | 10 + resnet_18/resnet_18.prototxt | 70 +- ...snet_32_resnet_32__2017-01-11_18-29-29.log | 6571 +++++++++++++++++ resnet_32/loss.png | Bin 0 -> 36394 bytes resnet_32/resnet_32.prototxt | 126 +- 6 files changed, 6584 insertions(+), 195 deletions(-) create mode 100644 README.md create mode 100644 resnet_32/logs/resnet_32_resnet_32__2017-01-11_18-29-29.log create mode 100644 resnet_32/loss.png diff --git a/.gitignore b/.gitignore index b460b20..91c7001 100644 --- a/.gitignore +++ b/.gitignore @@ -59,7 +59,7 @@ coverage.xml *.pot # Django stuff: -*.log +#*.log local_settings.py # Flask stuff: diff --git a/README.md b/README.md new file mode 100644 index 0000000..d8286b4 --- /dev/null +++ b/README.md @@ -0,0 +1,10 @@ +# train resnet imagenet with caffe +### resnet-18 +### resnet-32 +This is a bottleneck architecture, +Since there's no strong data augmentation and 10-crop test in caffe, the results maybe a bit low. +test accuracy: accuracy@1 = 0.67892, accuracy@5 = 0.88164 +training loss for resnet-32 is shown below: +the pretrained model is provided [here]() +![a](resnet_32/loss.png) + diff --git a/resnet_18/resnet_18.prototxt b/resnet_18/resnet_18.prototxt index 0c63d45..183e403 100644 --- a/resnet_18/resnet_18.prototxt +++ b/resnet_18/resnet_18.prototxt @@ -56,7 +56,7 @@ layer { } bias_filler { type: "constant" - value: 0.2 + value: 0 } } } @@ -108,10 +108,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -145,10 +141,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -188,10 +180,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -231,10 +219,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -281,10 +265,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -324,10 +304,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -367,10 +343,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -417,10 +389,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -460,10 +428,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -503,10 +467,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -553,10 +513,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -590,10 +546,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -633,10 +585,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -676,10 +624,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -726,10 +670,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -769,10 +709,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { @@ -812,10 +748,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } layer { diff --git a/resnet_32/logs/resnet_32_resnet_32__2017-01-11_18-29-29.log b/resnet_32/logs/resnet_32_resnet_32__2017-01-11_18-29-29.log new file mode 100644 index 0000000..6b79eed --- /dev/null +++ b/resnet_32/logs/resnet_32_resnet_32__2017-01-11_18-29-29.log @@ -0,0 +1,6571 @@ +++ echo Logging output to resnet_32/logs/resnet_32_resnet_32__2017-01-11_18-29-29.log +Logging output to resnet_32/logs/resnet_32_resnet_32__2017-01-11_18-29-29.log +++ /home/heyihui/heyihui-local/caffe/build/tools/caffe train -gpu 0,1,2,3 -solver resnet_32/resnet_32_solver.prototxt -sighup_effect stop +I0111 18:29:29.948931 12987 caffe.cpp:217] Using GPUs 0, 1, 2, 3 +I0111 18:29:30.031293 12987 caffe.cpp:222] GPU 0: GeForce GTX TITAN X +I0111 18:29:30.032470 12987 caffe.cpp:222] GPU 1: GeForce GTX TITAN X +I0111 18:29:30.033612 12987 caffe.cpp:222] GPU 2: GeForce GTX TITAN X +I0111 18:29:30.034747 12987 caffe.cpp:222] GPU 3: GeForce GTX TITAN X +I0111 18:29:30.730912 12987 solver.cpp:48] Initializing solver from parameters: +base_lr: 0.1 +display: 1000 +max_iter: 800000 +lr_policy: "step" +gamma: 0.1 +momentum: 0.9 +weight_decay: 0.0001 +stepsize: 200000 +snapshot: 50000 +snapshot_prefix: "/home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32" +solver_mode: GPU +device_id: 0 +net: "resnet_32/resnet_32.prototxt" +train_state { + level: 0 + stage: "" +} +average_loss: 1000 +I0111 18:29:31.147613 12987 solver.cpp:91] Creating training net from net file: resnet_32/resnet_32.prototxt +I0111 18:29:31.176447 12987 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer resnet_32 +I0111 18:29:31.177498 12987 net.cpp:58] Initializing net from parameters: +name: "ResNet-32" +state { + phase: TRAIN + level: 0 + stage: "" +} +layer { + name: "resnet_32" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + data_param { + source: "/home/heyihui/heyihui-local/caffe/examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 32 + backend: LMDB + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + convolution_param { + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "bn_conv1" + type: "BatchNorm" + bottom: "conv1" + top: "conv1" + batch_norm_param { + } +} +layer { + name: "scale_conv1" + type: "Scale" + bottom: "conv1" + top: "conv1" + scale_param { + bias_term: true + } +} +layer { + name: "conv1_relu" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "res2a_branch1" + type: "Convolution" + bottom: "pool1" + top: "res2a_branch1" + convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn2a_branch1" + type: "BatchNorm" + bottom: "res2a_branch1" + top: "res2a_branch1" + batch_norm_param { + } +} +layer { + name: "scale2a_branch1" + type: "Scale" + bottom: "res2a_branch1" + top: "res2a_branch1" + scale_param { + bias_term: true + } +} +layer { + name: "res2a_branch2a" + type: "Convolution" + bottom: "pool1" + top: "res2a_branch2a" + convolution_param { + num_output: 64 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn2a_branch2a" + type: "BatchNorm" + bottom: "res2a_branch2a" + top: "res2a_branch2a" + batch_norm_param { + } +} +layer { + name: "scale2a_branch2a" + type: "Scale" + bottom: "res2a_branch2a" + top: "res2a_branch2a" + scale_param { + bias_term: true + } +} +layer { + name: "res2a_branch2a_relu" + type: "ReLU" + bottom: "res2a_branch2a" + top: "res2a_branch2a" +} +layer { + name: "res2a_branch2b" + type: "Convolution" + bottom: "res2a_branch2a" + top: "res2a_branch2b" + convolution_param { + num_output: 64 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn2a_branch2b" + type: "BatchNorm" + bottom: "res2a_branch2b" + top: "res2a_branch2b" + batch_norm_param { + } +} +layer { + name: "scale2a_branch2b" + type: "Scale" + bottom: "res2a_branch2b" + top: "res2a_branch2b" + scale_param { + bias_term: true + } +} +layer { + name: "res2a_branch2b_relu" + type: "ReLU" + bottom: "res2a_branch2b" + top: "res2a_branch2b" +} +layer { + name: "res2a_branch2c" + type: "Convolution" + bottom: "res2a_branch2b" + top: "res2a_branch2c" + convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn2a_branch2c" + type: "BatchNorm" + bottom: "res2a_branch2c" + top: "res2a_branch2c" + batch_norm_param { + } +} +layer { + name: "scale2a_branch2c" + type: "Scale" + bottom: "res2a_branch2c" + top: "res2a_branch2c" + scale_param { + bias_term: true + } +} +layer { + name: "res2a" + type: "Eltwise" + bottom: "res2a_branch1" + bottom: "res2a_branch2c" + top: "res2a" +} +layer { + name: "res2a_relu" + type: "ReLU" + bottom: "res2a" + top: "res2a" +} +layer { + name: "res2b_branch2a" + type: "Convolution" + bottom: "res2a" + top: "res2b_branch2a" + convolution_param { + num_output: 64 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn2b_branch2a" + type: "BatchNorm" + bottom: "res2b_branch2a" + top: "res2b_branch2a" + batch_norm_param { + } +} +layer { + name: "scale2b_branch2a" + type: "Scale" + bottom: "res2b_branch2a" + top: "res2b_branch2a" + scale_param { + bias_term: true + } +} +layer { + name: "res2b_branch2a_relu" + type: "ReLU" + bottom: "res2b_branch2a" + top: "res2b_branch2a" +} +layer { + name: "res2b_branch2b" + type: "Convolution" + bottom: "res2b_branch2a" + top: "res2b_branch2b" + convolution_param { + num_output: 64 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn2b_branch2b" + type: "BatchNorm" + bottom: "res2b_branch2b" + top: "res2b_branch2b" + batch_norm_param { + } +} +layer { + name: "scale2b_branch2b" + type: "Scale" + bottom: "res2b_branch2b" + top: "res2b_branch2b" + scale_param { + bias_term: true + } +} +layer { + name: "res2b_branch2b_relu" + type: "ReLU" + bottom: "res2b_branch2b" + top: "res2b_branch2b" +} +layer { + name: "res2b_branch2c" + type: "Convolution" + bottom: "res2b_branch2b" + top: "res2b_branch2c" + convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn2b_branch2c" + type: "BatchNorm" + bottom: "res2b_branch2c" + top: "res2b_branch2c" + batch_norm_param { + } +} +layer { + name: "scale2b_branch2c" + type: "Scale" + bottom: "res2b_branch2c" + top: "res2b_branch2c" + scale_param { + bias_term: true + } +} +layer { + name: "res2b" + type: "Eltwise" + bottom: "res2a" + bottom: "res2b_branch2c" + top: "res2b" +} +layer { + name: "res2b_relu" + type: "ReLU" + bottom: "res2b" + top: "res2b" +} +layer { + name: "res2c_branch2a" + type: "Convolution" + bottom: "res2b" + top: "res2c_branch2a" + convolution_param { + num_output: 64 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn2c_branch2a" + type: "BatchNorm" + bottom: "res2c_branch2a" + top: "res2c_branch2a" + batch_norm_param { + } +} +layer { + name: "scale2c_branch2a" + type: "Scale" + bottom: "res2c_branch2a" + top: "res2c_branch2a" + scale_param { + bias_term: true + } +} +layer { + name: "res2c_branch2a_relu" + type: "ReLU" + bottom: "res2c_branch2a" + top: "res2c_branch2a" +} +layer { + name: "res2c_branch2b" + type: "Convolution" + bottom: "res2c_branch2a" + top: "res2c_branch2b" + convolution_param { + num_output: 64 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn2c_branch2b" + type: "BatchNorm" + bottom: "res2c_branch2b" + top: "res2c_branch2b" + batch_norm_param { + } +} +layer { + name: "scale2c_branch2b" + type: "Scale" + bottom: "res2c_branch2b" + top: "res2c_branch2b" + scale_param { + bias_term: true + } +} +layer { + name: "res2c_branch2b_relu" + type: "ReLU" + bottom: "res2c_branch2b" + top: "res2c_branch2b" +} +layer { + name: "res2c_branch2c" + type: "Convolution" + bottom: "res2c_branch2b" + top: "res2c_branch2c" + convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn2c_branch2c" + type: "BatchNorm" + bottom: "res2c_branch2c" + top: "res2c_branch2c" + batch_norm_param { + } +} +layer { + name: "scale2c_branch2c" + type: "Scale" + bottom: "res2c_branch2c" + top: "res2c_branch2c" + scale_param { + bias_term: true + } +} +layer { + name: "res2c" + type: "Eltwise" + bottom: "res2b" + bottom: "res2c_branch2c" + top: "res2c" +} +layer { + name: "res2c_relu" + type: "ReLU" + bottom: "res2c" + top: "res2c" +} +layer { + name: "res3a_branch1" + type: "Convolution" + bottom: "res2c" + top: "res3a_branch1" + convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 2 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3a_branch1" + type: "BatchNorm" + bottom: "res3a_branch1" + top: "res3a_branch1" + batch_norm_param { + } +} +layer { + name: "scale3a_branch1" + type: "Scale" + bottom: "res3a_branch1" + top: "res3a_branch1" + scale_param { + bias_term: true + } +} +layer { + name: "res3a_branch2a" + type: "Convolution" + bottom: "res2c" + top: "res3a_branch2a" + convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 2 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3a_branch2a" + type: "BatchNorm" + bottom: "res3a_branch2a" + top: "res3a_branch2a" + batch_norm_param { + } +} +layer { + name: "scale3a_branch2a" + type: "Scale" + bottom: "res3a_branch2a" + top: "res3a_branch2a" + scale_param { + bias_term: true + } +} +layer { + name: "res3a_branch2a_relu" + type: "ReLU" + bottom: "res3a_branch2a" + top: "res3a_branch2a" +} +layer { + name: "res3a_branch2b" + type: "Convolution" + bottom: "res3a_branch2a" + top: "res3a_branch2b" + convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3a_branch2b" + type: "BatchNorm" + bottom: "res3a_branch2b" + top: "res3a_branch2b" + batch_norm_param { + } +} +layer { + name: "scale3a_branch2b" + type: "Scale" + bottom: "res3a_branch2b" + top: "res3a_branch2b" + scale_param { + bias_term: true + } +} +layer { + name: "res3a_branch2b_relu" + type: "ReLU" + bottom: "res3a_branch2b" + top: "res3a_branch2b" +} +layer { + name: "res3a_branch2c" + type: "Convolution" + bottom: "res3a_branch2b" + top: "res3a_branch2c" + convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3a_branch2c" + type: "BatchNorm" + bottom: "res3a_branch2c" + top: "res3a_branch2c" + batch_norm_param { + } +} +layer { + name: "scale3a_branch2c" + type: "Scale" + bottom: "res3a_branch2c" + top: "res3a_branch2c" + scale_param { + bias_term: true + } +} +layer { + name: "res3a" + type: "Eltwise" + bottom: "res3a_branch1" + bottom: "res3a_branch2c" + top: "res3a" +} +layer { + name: "res3a_relu" + type: "ReLU" + bottom: "res3a" + top: "res3a" +} +layer { + name: "res3b_branch2a" + type: "Convolution" + bottom: "res3a" + top: "res3b_branch2a" + convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3b_branch2a" + type: "BatchNorm" + bottom: "res3b_branch2a" + top: "res3b_branch2a" + batch_norm_param { + } +} +layer { + name: "scale3b_branch2a" + type: "Scale" + bottom: "res3b_branch2a" + top: "res3b_branch2a" + scale_param { + bias_term: true + } +} +layer { + name: "res3b_branch2a_relu" + type: "ReLU" + bottom: "res3b_branch2a" + top: "res3b_branch2a" +} +layer { + name: "res3b_branch2b" + type: "Convolution" + bottom: "res3b_branch2a" + top: "res3b_branch2b" + convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3b_branch2b" + type: "BatchNorm" + bottom: "res3b_branch2b" + top: "res3b_branch2b" + batch_norm_param { + } +} +layer { + name: "scale3b_branch2b" + type: "Scale" + bottom: "res3b_branch2b" + top: "res3b_branch2b" + scale_param { + bias_term: true + } +} +layer { + name: "res3b_branch2b_relu" + type: "ReLU" + bottom: "res3b_branch2b" + top: "res3b_branch2b" +} +layer { + name: "res3b_branch2c" + type: "Convolution" + bottom: "res3b_branch2b" + top: "res3b_branch2c" + convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3b_branch2c" + type: "BatchNorm" + bottom: "res3b_branch2c" + top: "res3b_branch2c" + batch_norm_param { + } +} +layer { + name: "scale3b_branch2c" + type: "Scale" + bottom: "res3b_branch2c" + top: "res3b_branch2c" + scale_param { + bias_term: true + } +} +layer { + name: "res3b" + type: "Eltwise" + bottom: "res3a" + bottom: "res3b_branch2c" + top: "res3b" +} +layer { + name: "res3b_relu" + type: "ReLU" + bottom: "res3b" + top: "res3b" +} +layer { + name: "res3c_branch2a" + type: "Convolution" + bottom: "res3b" + top: "res3c_branch2a" + convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3c_branch2a" + type: "BatchNorm" + bottom: "res3c_branch2a" + top: "res3c_branch2a" + batch_norm_param { + } +} +layer { + name: "scale3c_branch2a" + type: "Scale" + bottom: "res3c_branch2a" + top: "res3c_branch2a" + scale_param { + bias_term: true + } +} +layer { + name: "res3c_branch2a_relu" + type: "ReLU" + bottom: "res3c_branch2a" + top: "res3c_branch2a" +} +layer { + name: "res3c_branch2b" + type: "Convolution" + bottom: "res3c_branch2a" + top: "res3c_branch2b" + convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3c_branch2b" + type: "BatchNorm" + bottom: "res3c_branch2b" + top: "res3c_branch2b" + batch_norm_param { + } +} +layer { + name: "scale3c_branch2b" + type: "Scale" + bottom: "res3c_branch2b" + top: "res3c_branch2b" + scale_param { + bias_term: true + } +} +layer { + name: "res3c_branch2b_relu" + type: "ReLU" + bottom: "res3c_branch2b" + top: "res3c_branch2b" +} +layer { + name: "res3c_branch2c" + type: "Convolution" + bottom: "res3c_branch2b" + top: "res3c_branch2c" + convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3c_branch2c" + type: "BatchNorm" + bottom: "res3c_branch2c" + top: "res3c_branch2c" + batch_norm_param { + } +} +layer { + name: "scale3c_branch2c" + type: "Scale" + bottom: "res3c_branch2c" + top: "res3c_branch2c" + scale_param { + bias_term: true + } +} +layer { + name: "res3c" + type: "Eltwise" + bottom: "res3b" + bottom: "res3c_branch2c" + top: "res3c" +} +layer { + name: "res3c_relu" + type: "ReLU" + bottom: "res3c" + top: "res3c" +} +layer { + name: "res3d_branch2a" + type: "Convolution" + bottom: "res3c" + top: "res3d_branch2a" + convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3d_branch2a" + type: "BatchNorm" + bottom: "res3d_branch2a" + top: "res3d_branch2a" + batch_norm_param { + } +} +layer { + name: "scale3d_branch2a" + type: "Scale" + bottom: "res3d_branch2a" + top: "res3d_branch2a" + scale_param { + bias_term: true + } +} +layer { + name: "res3d_branch2a_relu" + type: "ReLU" + bottom: "res3d_branch2a" + top: "res3d_branch2a" +} +layer { + name: "res3d_branch2b" + type: "Convolution" + bottom: "res3d_branch2a" + top: "res3d_branch2b" + convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3d_branch2b" + type: "BatchNorm" + bottom: "res3d_branch2b" + top: "res3d_branch2b" + batch_norm_param { + } +} +layer { + name: "scale3d_branch2b" + type: "Scale" + bottom: "res3d_branch2b" + top: "res3d_branch2b" + scale_param { + bias_term: true + } +} +layer { + name: "res3d_branch2b_relu" + type: "ReLU" + bottom: "res3d_branch2b" + top: "res3d_branch2b" +} +layer { + name: "res3d_branch2c" + type: "Convolution" + bottom: "res3d_branch2b" + top: "res3d_branch2c" + convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn3d_branch2c" + type: "BatchNorm" + bottom: "res3d_branch2c" + top: "res3d_branch2c" + batch_norm_param { + } +} +layer { + name: "scale3d_branch2c" + type: "Scale" + bottom: "res3d_branch2c" + top: "res3d_branch2c" + scale_param { + bias_term: true + } +} +layer { + name: "res3d" + type: "Eltwise" + bottom: "res3c" + bottom: "res3d_branch2c" + top: "res3d" +} +layer { + name: "res3d_relu" + type: "ReLU" + bottom: "res3d" + top: "res3d" +} +layer { + name: "res4a_branch1" + type: "Convolution" + bottom: "res3d" + top: "res4a_branch1" + convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 2 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn4a_branch1" + type: "BatchNorm" + bottom: "res4a_branch1" + top: "res4a_branch1" + batch_norm_param { + } +} +layer { + name: "scale4a_branch1" + type: "Scale" + bottom: "res4a_branch1" + top: "res4a_branch1" + scale_param { + bias_term: true + } +} +layer { + name: "res4a_branch2a" + type: "Convolution" + bottom: "res3d" + top: "res4a_branch2a" + convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 2 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn4a_branch2a" + type: "BatchNorm" + bottom: "res4a_branch2a" + top: "res4a_branch2a" + batch_norm_param { + } +} +layer { + name: "scale4a_branch2a" + type: "Scale" + bottom: "res4a_branch2a" + top: "res4a_branch2a" + scale_param { + bias_term: true + } +} +layer { + name: "res4a_branch2a_relu" + type: "ReLU" + bottom: "res4a_branch2a" + top: "res4a_branch2a" +} +layer { + name: "res4a_branch2b" + type: "Convolution" + bottom: "res4a_branch2a" + top: "res4a_branch2b" + convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn4a_branch2b" + type: "BatchNorm" + bottom: "res4a_branch2b" + top: "res4a_branch2b" + batch_norm_param { + } +} +layer { + name: "scale4a_branch2b" + type: "Scale" + bottom: "res4a_branch2b" + top: "res4a_branch2b" + scale_param { + bias_term: true + } +} +layer { + name: "res4a_branch2b_relu" + type: "ReLU" + bottom: "res4a_branch2b" + top: "res4a_branch2b" +} +layer { + name: "res4a_branch2c" + type: "Convolution" + bottom: "res4a_branch2b" + top: "res4a_branch2c" + convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn4a_branch2c" + type: "BatchNorm" + bottom: "res4a_branch2c" + top: "res4a_branch2c" + batch_norm_param { + } +} +layer { + name: "scale4a_branch2c" + type: "Scale" + bottom: "res4a_branch2c" + top: "res4a_branch2c" + scale_param { + bias_term: true + } +} +layer { + name: "res4a" + type: "Eltwise" + bottom: "res4a_branch1" + bottom: "res4a_branch2c" + top: "res4a" +} +layer { + name: "res4a_relu" + type: "ReLU" + bottom: "res4a" + top: "res4a" +} +layer { + name: "res4b_branch2a" + type: "Convolution" + bottom: "res4a" + top: "res4b_branch2a" + convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn4b_branch2a" + type: "BatchNorm" + bottom: "res4b_branch2a" + top: "res4b_branch2a" + batch_norm_param { + } +} +layer { + name: "scale4b_branch2a" + type: "Scale" + bottom: "res4b_branch2a" + top: "res4b_branch2a" + scale_param { + bias_term: true + } +} +layer { + name: "res4b_branch2a_relu" + type: "ReLU" + bottom: "res4b_branch2a" + top: "res4b_branch2a" +} +layer { + name: "res4b_branch2b" + type: "Convolution" + bottom: "res4b_branch2a" + top: "res4b_branch2b" + convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn4b_branch2b" + type: "BatchNorm" + bottom: "res4b_branch2b" + top: "res4b_branch2b" + batch_norm_param { + } +} +layer { + name: "scale4b_branch2b" + type: "Scale" + bottom: "res4b_branch2b" + top: "res4b_branch2b" + scale_param { + bias_term: true + } +} +layer { + name: "res4b_branch2b_relu" + type: "ReLU" + bottom: "res4b_branch2b" + top: "res4b_branch2b" +} +layer { + name: "res4b_branch2c" + type: "Convolution" + bottom: "res4b_branch2b" + top: "res4b_branch2c" + convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn4b_branch2c" + type: "BatchNorm" + bottom: "res4b_branch2c" + top: "res4b_branch2c" + batch_norm_param { + } +} +layer { + name: "scale4b_branch2c" + type: "Scale" + bottom: "res4b_branch2c" + top: "res4b_branch2c" + scale_param { + bias_term: true + } +} +layer { + name: "res4b" + type: "Eltwise" + bottom: "res4a" + bottom: "res4b_branch2c" + top: "res4b" +} +layer { + name: "res4b_relu" + type: "ReLU" + bottom: "res4b" + top: "res4b" +} +layer { + name: "res4c_branch2a" + type: "Convolution" + bottom: "res4b" + top: "res4c_branch2a" + convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + } +} +layer { + name: "bn4c_branch2a" + type: "BatchNorm" + bottom: "res4c_branch2a" + top: "res4c_branch2a" + batch_norm_param { + } +} +layer { + name: "scale4c_branch2a" + type: "Scale" + bottom: "res4c_branch2a" + top: "res4c_branch2a" + scale_param { + bias_term: true + } +} +layer { + name: "res4c_branch2a_relu" + type: "ReLU" + bottom: "res4c_branch2a" + top: "res4c_branch2a" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "res4c_branch2a" + top: "pool5" + pooling_param { + pool: AVE + kernel_size: 7 + stride: 1 + } +} +layer { + name: "fc1000" + type: "InnerProduct" + bottom: "pool5" + top: "fc1000" + inner_product_param { + num_output: 1000 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "prob" + type: "SoftmaxWithLoss" + bottom: "fc1000" + bottom: "label" + top: "prob" + include { + phase: TRAIN + } +} +layer { + name: "accuracy/top1" + type: "Accuracy" + bottom: "fc1000" + bottom: "label" + top: "accuracy@1" + accuracy_param { + top_k: 1 + } +} +layer { + name: "accuracy/top5" + type: "Accuracy" + bottom: "fc1000" + bottom: "label" + top: "accuracy@5" + accuracy_param { + top_k: 5 + } +} +I0111 18:29:31.178401 12987 layer_factory.hpp:77] Creating layer resnet_32 +I0111 18:29:31.179834 12987 net.cpp:100] Creating Layer resnet_32 +I0111 18:29:31.179862 12987 net.cpp:408] resnet_32 -> data +I0111 18:29:31.179939 12987 net.cpp:408] resnet_32 -> label +I0111 18:29:31.218631 12996 db_lmdb.cpp:35] Opened lmdb /home/heyihui/heyihui-local/caffe/examples/imagenet/ilsvrc12_train_lmdb +I0111 18:29:31.980911 12987 data_layer.cpp:41] output data size: 32,3,224,224 +I0111 18:29:32.033973 12987 net.cpp:150] Setting up resnet_32 +I0111 18:29:32.034021 12987 net.cpp:157] Top shape: 32 3 224 224 (4816896) +I0111 18:29:32.034030 12987 net.cpp:157] Top shape: 32 (32) +I0111 18:29:32.034035 12987 net.cpp:165] Memory required for data: 19267712 +I0111 18:29:32.034051 12987 layer_factory.hpp:77] Creating layer label_resnet_32_1_split +I0111 18:29:32.034073 12987 net.cpp:100] Creating Layer label_resnet_32_1_split +I0111 18:29:32.034082 12987 net.cpp:434] label_resnet_32_1_split <- label +I0111 18:29:32.034102 12987 net.cpp:408] label_resnet_32_1_split -> label_resnet_32_1_split_0 +I0111 18:29:32.034121 12987 net.cpp:408] label_resnet_32_1_split -> label_resnet_32_1_split_1 +I0111 18:29:32.034133 12987 net.cpp:408] label_resnet_32_1_split -> label_resnet_32_1_split_2 +I0111 18:29:32.034380 12987 net.cpp:150] Setting up label_resnet_32_1_split +I0111 18:29:32.034425 12987 net.cpp:157] Top shape: 32 (32) +I0111 18:29:32.034451 12987 net.cpp:157] Top shape: 32 (32) +I0111 18:29:32.034459 12987 net.cpp:157] Top shape: 32 (32) +I0111 18:29:32.034464 12987 net.cpp:165] Memory required for data: 19268096 +I0111 18:29:32.034476 12987 layer_factory.hpp:77] Creating layer conv1 +I0111 18:29:32.034534 12987 net.cpp:100] Creating Layer conv1 +I0111 18:29:32.034548 12987 net.cpp:434] conv1 <- data +I0111 18:29:32.034567 12987 net.cpp:408] conv1 -> conv1 +I0111 18:29:32.039474 12997 blocking_queue.cpp:50] Waiting for data +I0111 18:29:32.389271 12987 net.cpp:150] Setting up conv1 +I0111 18:29:32.389350 12987 net.cpp:157] Top shape: 32 64 112 112 (25690112) +I0111 18:29:32.389360 12987 net.cpp:165] Memory required for data: 122028544 +I0111 18:29:32.389415 12987 layer_factory.hpp:77] Creating layer bn_conv1 +I0111 18:29:32.389448 12987 net.cpp:100] Creating Layer bn_conv1 +I0111 18:29:32.389458 12987 net.cpp:434] bn_conv1 <- conv1 +I0111 18:29:32.389472 12987 net.cpp:395] bn_conv1 -> conv1 (in-place) +I0111 18:29:32.389852 12987 net.cpp:150] Setting up bn_conv1 +I0111 18:29:32.389868 12987 net.cpp:157] Top shape: 32 64 112 112 (25690112) +I0111 18:29:32.389873 12987 net.cpp:165] Memory required for data: 224788992 +I0111 18:29:32.389894 12987 layer_factory.hpp:77] Creating layer scale_conv1 +I0111 18:29:32.389917 12987 net.cpp:100] Creating Layer scale_conv1 +I0111 18:29:32.389928 12987 net.cpp:434] scale_conv1 <- conv1 +I0111 18:29:32.389937 12987 net.cpp:395] scale_conv1 -> conv1 (in-place) +I0111 18:29:32.390022 12987 layer_factory.hpp:77] Creating layer scale_conv1 +I0111 18:29:32.390269 12987 net.cpp:150] Setting up scale_conv1 +I0111 18:29:32.390285 12987 net.cpp:157] Top shape: 32 64 112 112 (25690112) +I0111 18:29:32.390291 12987 net.cpp:165] Memory required for data: 327549440 +I0111 18:29:32.390301 12987 layer_factory.hpp:77] Creating layer conv1_relu +I0111 18:29:32.390317 12987 net.cpp:100] Creating Layer conv1_relu +I0111 18:29:32.390323 12987 net.cpp:434] conv1_relu <- conv1 +I0111 18:29:32.390333 12987 net.cpp:395] conv1_relu -> conv1 (in-place) +I0111 18:29:32.391904 12987 net.cpp:150] Setting up conv1_relu +I0111 18:29:32.391932 12987 net.cpp:157] Top shape: 32 64 112 112 (25690112) +I0111 18:29:32.391938 12987 net.cpp:165] Memory required for data: 430309888 +I0111 18:29:32.391945 12987 layer_factory.hpp:77] Creating layer pool1 +I0111 18:29:32.391958 12987 net.cpp:100] Creating Layer pool1 +I0111 18:29:32.391965 12987 net.cpp:434] pool1 <- conv1 +I0111 18:29:32.391975 12987 net.cpp:408] pool1 -> pool1 +I0111 18:29:32.392086 12987 net.cpp:150] Setting up pool1 +I0111 18:29:32.392103 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.392114 12987 net.cpp:165] Memory required for data: 456000000 +I0111 18:29:32.392122 12987 layer_factory.hpp:77] Creating layer pool1_pool1_0_split +I0111 18:29:32.392137 12987 net.cpp:100] Creating Layer pool1_pool1_0_split +I0111 18:29:32.392143 12987 net.cpp:434] pool1_pool1_0_split <- pool1 +I0111 18:29:32.392153 12987 net.cpp:408] pool1_pool1_0_split -> pool1_pool1_0_split_0 +I0111 18:29:32.392164 12987 net.cpp:408] pool1_pool1_0_split -> pool1_pool1_0_split_1 +I0111 18:29:32.392232 12987 net.cpp:150] Setting up pool1_pool1_0_split +I0111 18:29:32.392246 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.392254 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.392262 12987 net.cpp:165] Memory required for data: 507380224 +I0111 18:29:32.392268 12987 layer_factory.hpp:77] Creating layer res2a_branch1 +I0111 18:29:32.392289 12987 net.cpp:100] Creating Layer res2a_branch1 +I0111 18:29:32.392298 12987 net.cpp:434] res2a_branch1 <- pool1_pool1_0_split_0 +I0111 18:29:32.392312 12987 net.cpp:408] res2a_branch1 -> res2a_branch1 +I0111 18:29:32.396167 12987 net.cpp:150] Setting up res2a_branch1 +I0111 18:29:32.396194 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.396203 12987 net.cpp:165] Memory required for data: 610140672 +I0111 18:29:32.396214 12987 layer_factory.hpp:77] Creating layer bn2a_branch1 +I0111 18:29:32.396227 12987 net.cpp:100] Creating Layer bn2a_branch1 +I0111 18:29:32.396250 12987 net.cpp:434] bn2a_branch1 <- res2a_branch1 +I0111 18:29:32.396260 12987 net.cpp:395] bn2a_branch1 -> res2a_branch1 (in-place) +I0111 18:29:32.397647 12987 net.cpp:150] Setting up bn2a_branch1 +I0111 18:29:32.397671 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.397680 12987 net.cpp:165] Memory required for data: 712901120 +I0111 18:29:32.397703 12987 layer_factory.hpp:77] Creating layer scale2a_branch1 +I0111 18:29:32.397718 12987 net.cpp:100] Creating Layer scale2a_branch1 +I0111 18:29:32.397727 12987 net.cpp:434] scale2a_branch1 <- res2a_branch1 +I0111 18:29:32.397735 12987 net.cpp:395] scale2a_branch1 -> res2a_branch1 (in-place) +I0111 18:29:32.397805 12987 layer_factory.hpp:77] Creating layer scale2a_branch1 +I0111 18:29:32.397984 12987 net.cpp:150] Setting up scale2a_branch1 +I0111 18:29:32.397997 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.398005 12987 net.cpp:165] Memory required for data: 815661568 +I0111 18:29:32.398015 12987 layer_factory.hpp:77] Creating layer res2a_branch2a +I0111 18:29:32.398031 12987 net.cpp:100] Creating Layer res2a_branch2a +I0111 18:29:32.398039 12987 net.cpp:434] res2a_branch2a <- pool1_pool1_0_split_1 +I0111 18:29:32.398048 12987 net.cpp:408] res2a_branch2a -> res2a_branch2a +I0111 18:29:32.400405 12987 net.cpp:150] Setting up res2a_branch2a +I0111 18:29:32.400431 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.400440 12987 net.cpp:165] Memory required for data: 841351680 +I0111 18:29:32.400449 12987 layer_factory.hpp:77] Creating layer bn2a_branch2a +I0111 18:29:32.400463 12987 net.cpp:100] Creating Layer bn2a_branch2a +I0111 18:29:32.400472 12987 net.cpp:434] bn2a_branch2a <- res2a_branch2a +I0111 18:29:32.400481 12987 net.cpp:395] bn2a_branch2a -> res2a_branch2a (in-place) +I0111 18:29:32.400795 12987 net.cpp:150] Setting up bn2a_branch2a +I0111 18:29:32.400809 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.400816 12987 net.cpp:165] Memory required for data: 867041792 +I0111 18:29:32.400833 12987 layer_factory.hpp:77] Creating layer scale2a_branch2a +I0111 18:29:32.400847 12987 net.cpp:100] Creating Layer scale2a_branch2a +I0111 18:29:32.400856 12987 net.cpp:434] scale2a_branch2a <- res2a_branch2a +I0111 18:29:32.400862 12987 net.cpp:395] scale2a_branch2a -> res2a_branch2a (in-place) +I0111 18:29:32.400924 12987 layer_factory.hpp:77] Creating layer scale2a_branch2a +I0111 18:29:32.401113 12987 net.cpp:150] Setting up scale2a_branch2a +I0111 18:29:32.401125 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.401130 12987 net.cpp:165] Memory required for data: 892731904 +I0111 18:29:32.401144 12987 layer_factory.hpp:77] Creating layer res2a_branch2a_relu +I0111 18:29:32.401156 12987 net.cpp:100] Creating Layer res2a_branch2a_relu +I0111 18:29:32.401163 12987 net.cpp:434] res2a_branch2a_relu <- res2a_branch2a +I0111 18:29:32.401173 12987 net.cpp:395] res2a_branch2a_relu -> res2a_branch2a (in-place) +I0111 18:29:32.402529 12987 net.cpp:150] Setting up res2a_branch2a_relu +I0111 18:29:32.402554 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.402561 12987 net.cpp:165] Memory required for data: 918422016 +I0111 18:29:32.402567 12987 layer_factory.hpp:77] Creating layer res2a_branch2b +I0111 18:29:32.402582 12987 net.cpp:100] Creating Layer res2a_branch2b +I0111 18:29:32.402591 12987 net.cpp:434] res2a_branch2b <- res2a_branch2a +I0111 18:29:32.402601 12987 net.cpp:408] res2a_branch2b -> res2a_branch2b +I0111 18:29:32.407563 12987 net.cpp:150] Setting up res2a_branch2b +I0111 18:29:32.407589 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.407598 12987 net.cpp:165] Memory required for data: 944112128 +I0111 18:29:32.407608 12987 layer_factory.hpp:77] Creating layer bn2a_branch2b +I0111 18:29:32.407621 12987 net.cpp:100] Creating Layer bn2a_branch2b +I0111 18:29:32.407630 12987 net.cpp:434] bn2a_branch2b <- res2a_branch2b +I0111 18:29:32.407639 12987 net.cpp:395] bn2a_branch2b -> res2a_branch2b (in-place) +I0111 18:29:32.407949 12987 net.cpp:150] Setting up bn2a_branch2b +I0111 18:29:32.407974 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.407981 12987 net.cpp:165] Memory required for data: 969802240 +I0111 18:29:32.407994 12987 layer_factory.hpp:77] Creating layer scale2a_branch2b +I0111 18:29:32.408004 12987 net.cpp:100] Creating Layer scale2a_branch2b +I0111 18:29:32.408012 12987 net.cpp:434] scale2a_branch2b <- res2a_branch2b +I0111 18:29:32.408023 12987 net.cpp:395] scale2a_branch2b -> res2a_branch2b (in-place) +I0111 18:29:32.408088 12987 layer_factory.hpp:77] Creating layer scale2a_branch2b +I0111 18:29:32.408274 12987 net.cpp:150] Setting up scale2a_branch2b +I0111 18:29:32.408288 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.408293 12987 net.cpp:165] Memory required for data: 995492352 +I0111 18:29:32.408303 12987 layer_factory.hpp:77] Creating layer res2a_branch2b_relu +I0111 18:29:32.408313 12987 net.cpp:100] Creating Layer res2a_branch2b_relu +I0111 18:29:32.408321 12987 net.cpp:434] res2a_branch2b_relu <- res2a_branch2b +I0111 18:29:32.408329 12987 net.cpp:395] res2a_branch2b_relu -> res2a_branch2b (in-place) +I0111 18:29:32.408615 12987 net.cpp:150] Setting up res2a_branch2b_relu +I0111 18:29:32.408632 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.408638 12987 net.cpp:165] Memory required for data: 1021182464 +I0111 18:29:32.408643 12987 layer_factory.hpp:77] Creating layer res2a_branch2c +I0111 18:29:32.408656 12987 net.cpp:100] Creating Layer res2a_branch2c +I0111 18:29:32.408663 12987 net.cpp:434] res2a_branch2c <- res2a_branch2b +I0111 18:29:32.408673 12987 net.cpp:408] res2a_branch2c -> res2a_branch2c +I0111 18:29:32.412084 12987 net.cpp:150] Setting up res2a_branch2c +I0111 18:29:32.412109 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.412122 12987 net.cpp:165] Memory required for data: 1123942912 +I0111 18:29:32.412130 12987 layer_factory.hpp:77] Creating layer bn2a_branch2c +I0111 18:29:32.412143 12987 net.cpp:100] Creating Layer bn2a_branch2c +I0111 18:29:32.412152 12987 net.cpp:434] bn2a_branch2c <- res2a_branch2c +I0111 18:29:32.412159 12987 net.cpp:395] bn2a_branch2c -> res2a_branch2c (in-place) +I0111 18:29:32.412456 12987 net.cpp:150] Setting up bn2a_branch2c +I0111 18:29:32.412469 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.412477 12987 net.cpp:165] Memory required for data: 1226703360 +I0111 18:29:32.412488 12987 layer_factory.hpp:77] Creating layer scale2a_branch2c +I0111 18:29:32.412500 12987 net.cpp:100] Creating Layer scale2a_branch2c +I0111 18:29:32.412508 12987 net.cpp:434] scale2a_branch2c <- res2a_branch2c +I0111 18:29:32.412514 12987 net.cpp:395] scale2a_branch2c -> res2a_branch2c (in-place) +I0111 18:29:32.412575 12987 layer_factory.hpp:77] Creating layer scale2a_branch2c +I0111 18:29:32.412747 12987 net.cpp:150] Setting up scale2a_branch2c +I0111 18:29:32.412760 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.412765 12987 net.cpp:165] Memory required for data: 1329463808 +I0111 18:29:32.412776 12987 layer_factory.hpp:77] Creating layer res2a +I0111 18:29:32.412793 12987 net.cpp:100] Creating Layer res2a +I0111 18:29:32.412801 12987 net.cpp:434] res2a <- res2a_branch1 +I0111 18:29:32.412808 12987 net.cpp:434] res2a <- res2a_branch2c +I0111 18:29:32.412816 12987 net.cpp:408] res2a -> res2a +I0111 18:29:32.412865 12987 net.cpp:150] Setting up res2a +I0111 18:29:32.412878 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.412885 12987 net.cpp:165] Memory required for data: 1432224256 +I0111 18:29:32.412890 12987 layer_factory.hpp:77] Creating layer res2a_relu +I0111 18:29:32.412899 12987 net.cpp:100] Creating Layer res2a_relu +I0111 18:29:32.412904 12987 net.cpp:434] res2a_relu <- res2a +I0111 18:29:32.412910 12987 net.cpp:395] res2a_relu -> res2a (in-place) +I0111 18:29:32.414528 12987 net.cpp:150] Setting up res2a_relu +I0111 18:29:32.414582 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.414593 12987 net.cpp:165] Memory required for data: 1534984704 +I0111 18:29:32.414604 12987 layer_factory.hpp:77] Creating layer res2a_res2a_relu_0_split +I0111 18:29:32.414650 12987 net.cpp:100] Creating Layer res2a_res2a_relu_0_split +I0111 18:29:32.414664 12987 net.cpp:434] res2a_res2a_relu_0_split <- res2a +I0111 18:29:32.414702 12987 net.cpp:408] res2a_res2a_relu_0_split -> res2a_res2a_relu_0_split_0 +I0111 18:29:32.414726 12987 net.cpp:408] res2a_res2a_relu_0_split -> res2a_res2a_relu_0_split_1 +I0111 18:29:32.414847 12987 net.cpp:150] Setting up res2a_res2a_relu_0_split +I0111 18:29:32.414865 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.414875 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.414881 12987 net.cpp:165] Memory required for data: 1740505600 +I0111 18:29:32.414888 12987 layer_factory.hpp:77] Creating layer res2b_branch2a +I0111 18:29:32.414913 12987 net.cpp:100] Creating Layer res2b_branch2a +I0111 18:29:32.414923 12987 net.cpp:434] res2b_branch2a <- res2a_res2a_relu_0_split_0 +I0111 18:29:32.414935 12987 net.cpp:408] res2b_branch2a -> res2b_branch2a +I0111 18:29:32.419421 12987 net.cpp:150] Setting up res2b_branch2a +I0111 18:29:32.419455 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.419466 12987 net.cpp:165] Memory required for data: 1766195712 +I0111 18:29:32.419493 12987 layer_factory.hpp:77] Creating layer bn2b_branch2a +I0111 18:29:32.419512 12987 net.cpp:100] Creating Layer bn2b_branch2a +I0111 18:29:32.419523 12987 net.cpp:434] bn2b_branch2a <- res2b_branch2a +I0111 18:29:32.419535 12987 net.cpp:395] bn2b_branch2a -> res2b_branch2a (in-place) +I0111 18:29:32.419950 12987 net.cpp:150] Setting up bn2b_branch2a +I0111 18:29:32.419967 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.419978 12987 net.cpp:165] Memory required for data: 1791885824 +I0111 18:29:32.420006 12987 layer_factory.hpp:77] Creating layer scale2b_branch2a +I0111 18:29:32.420023 12987 net.cpp:100] Creating Layer scale2b_branch2a +I0111 18:29:32.420033 12987 net.cpp:434] scale2b_branch2a <- res2b_branch2a +I0111 18:29:32.420044 12987 net.cpp:395] scale2b_branch2a -> res2b_branch2a (in-place) +I0111 18:29:32.420136 12987 layer_factory.hpp:77] Creating layer scale2b_branch2a +I0111 18:29:32.420389 12987 net.cpp:150] Setting up scale2b_branch2a +I0111 18:29:32.420404 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.420413 12987 net.cpp:165] Memory required for data: 1817575936 +I0111 18:29:32.420426 12987 layer_factory.hpp:77] Creating layer res2b_branch2a_relu +I0111 18:29:32.420439 12987 net.cpp:100] Creating Layer res2b_branch2a_relu +I0111 18:29:32.420449 12987 net.cpp:434] res2b_branch2a_relu <- res2b_branch2a +I0111 18:29:32.420464 12987 net.cpp:395] res2b_branch2a_relu -> res2b_branch2a (in-place) +I0111 18:29:32.422087 12987 net.cpp:150] Setting up res2b_branch2a_relu +I0111 18:29:32.422116 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.422124 12987 net.cpp:165] Memory required for data: 1843266048 +I0111 18:29:32.422132 12987 layer_factory.hpp:77] Creating layer res2b_branch2b +I0111 18:29:32.422152 12987 net.cpp:100] Creating Layer res2b_branch2b +I0111 18:29:32.422163 12987 net.cpp:434] res2b_branch2b <- res2b_branch2a +I0111 18:29:32.422178 12987 net.cpp:408] res2b_branch2b -> res2b_branch2b +I0111 18:29:32.427004 12987 net.cpp:150] Setting up res2b_branch2b +I0111 18:29:32.427036 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.427044 12987 net.cpp:165] Memory required for data: 1868956160 +I0111 18:29:32.427065 12987 layer_factory.hpp:77] Creating layer bn2b_branch2b +I0111 18:29:32.427081 12987 net.cpp:100] Creating Layer bn2b_branch2b +I0111 18:29:32.427090 12987 net.cpp:434] bn2b_branch2b <- res2b_branch2b +I0111 18:29:32.427103 12987 net.cpp:395] bn2b_branch2b -> res2b_branch2b (in-place) +I0111 18:29:32.427498 12987 net.cpp:150] Setting up bn2b_branch2b +I0111 18:29:32.427513 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.427520 12987 net.cpp:165] Memory required for data: 1894646272 +I0111 18:29:32.427536 12987 layer_factory.hpp:77] Creating layer scale2b_branch2b +I0111 18:29:32.427548 12987 net.cpp:100] Creating Layer scale2b_branch2b +I0111 18:29:32.427567 12987 net.cpp:434] scale2b_branch2b <- res2b_branch2b +I0111 18:29:32.427577 12987 net.cpp:395] scale2b_branch2b -> res2b_branch2b (in-place) +I0111 18:29:32.427656 12987 layer_factory.hpp:77] Creating layer scale2b_branch2b +I0111 18:29:32.427914 12987 net.cpp:150] Setting up scale2b_branch2b +I0111 18:29:32.427930 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.427937 12987 net.cpp:165] Memory required for data: 1920336384 +I0111 18:29:32.427948 12987 layer_factory.hpp:77] Creating layer res2b_branch2b_relu +I0111 18:29:32.427974 12987 net.cpp:100] Creating Layer res2b_branch2b_relu +I0111 18:29:32.427983 12987 net.cpp:434] res2b_branch2b_relu <- res2b_branch2b +I0111 18:29:32.427992 12987 net.cpp:395] res2b_branch2b_relu -> res2b_branch2b (in-place) +I0111 18:29:32.428356 12987 net.cpp:150] Setting up res2b_branch2b_relu +I0111 18:29:32.428375 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.428382 12987 net.cpp:165] Memory required for data: 1946026496 +I0111 18:29:32.428390 12987 layer_factory.hpp:77] Creating layer res2b_branch2c +I0111 18:29:32.428406 12987 net.cpp:100] Creating Layer res2b_branch2c +I0111 18:29:32.428414 12987 net.cpp:434] res2b_branch2c <- res2b_branch2b +I0111 18:29:32.428426 12987 net.cpp:408] res2b_branch2c -> res2b_branch2c +I0111 18:29:32.432488 12987 net.cpp:150] Setting up res2b_branch2c +I0111 18:29:32.432515 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.432524 12987 net.cpp:165] Memory required for data: 2048786944 +I0111 18:29:32.432545 12987 layer_factory.hpp:77] Creating layer bn2b_branch2c +I0111 18:29:32.432562 12987 net.cpp:100] Creating Layer bn2b_branch2c +I0111 18:29:32.432571 12987 net.cpp:434] bn2b_branch2c <- res2b_branch2c +I0111 18:29:32.432581 12987 net.cpp:395] bn2b_branch2c -> res2b_branch2c (in-place) +I0111 18:29:32.432973 12987 net.cpp:150] Setting up bn2b_branch2c +I0111 18:29:32.432988 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.432996 12987 net.cpp:165] Memory required for data: 2151547392 +I0111 18:29:32.433010 12987 layer_factory.hpp:77] Creating layer scale2b_branch2c +I0111 18:29:32.433022 12987 net.cpp:100] Creating Layer scale2b_branch2c +I0111 18:29:32.433029 12987 net.cpp:434] scale2b_branch2c <- res2b_branch2c +I0111 18:29:32.433043 12987 net.cpp:395] scale2b_branch2c -> res2b_branch2c (in-place) +I0111 18:29:32.433116 12987 layer_factory.hpp:77] Creating layer scale2b_branch2c +I0111 18:29:32.433342 12987 net.cpp:150] Setting up scale2b_branch2c +I0111 18:29:32.433358 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.433367 12987 net.cpp:165] Memory required for data: 2254307840 +I0111 18:29:32.433377 12987 layer_factory.hpp:77] Creating layer res2b +I0111 18:29:32.433388 12987 net.cpp:100] Creating Layer res2b +I0111 18:29:32.433395 12987 net.cpp:434] res2b <- res2a_res2a_relu_0_split_1 +I0111 18:29:32.433405 12987 net.cpp:434] res2b <- res2b_branch2c +I0111 18:29:32.433414 12987 net.cpp:408] res2b -> res2b +I0111 18:29:32.433465 12987 net.cpp:150] Setting up res2b +I0111 18:29:32.433478 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.433483 12987 net.cpp:165] Memory required for data: 2357068288 +I0111 18:29:32.433490 12987 layer_factory.hpp:77] Creating layer res2b_relu +I0111 18:29:32.433501 12987 net.cpp:100] Creating Layer res2b_relu +I0111 18:29:32.433509 12987 net.cpp:434] res2b_relu <- res2b +I0111 18:29:32.433524 12987 net.cpp:395] res2b_relu -> res2b (in-place) +I0111 18:29:32.435026 12987 net.cpp:150] Setting up res2b_relu +I0111 18:29:32.435050 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.435060 12987 net.cpp:165] Memory required for data: 2459828736 +I0111 18:29:32.435076 12987 layer_factory.hpp:77] Creating layer res2b_res2b_relu_0_split +I0111 18:29:32.435093 12987 net.cpp:100] Creating Layer res2b_res2b_relu_0_split +I0111 18:29:32.435102 12987 net.cpp:434] res2b_res2b_relu_0_split <- res2b +I0111 18:29:32.435113 12987 net.cpp:408] res2b_res2b_relu_0_split -> res2b_res2b_relu_0_split_0 +I0111 18:29:32.435140 12987 net.cpp:408] res2b_res2b_relu_0_split -> res2b_res2b_relu_0_split_1 +I0111 18:29:32.435240 12987 net.cpp:150] Setting up res2b_res2b_relu_0_split +I0111 18:29:32.435257 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.435267 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.435272 12987 net.cpp:165] Memory required for data: 2665349632 +I0111 18:29:32.435278 12987 layer_factory.hpp:77] Creating layer res2c_branch2a +I0111 18:29:32.435295 12987 net.cpp:100] Creating Layer res2c_branch2a +I0111 18:29:32.435307 12987 net.cpp:434] res2c_branch2a <- res2b_res2b_relu_0_split_0 +I0111 18:29:32.435319 12987 net.cpp:408] res2c_branch2a -> res2c_branch2a +I0111 18:29:32.438247 12987 net.cpp:150] Setting up res2c_branch2a +I0111 18:29:32.438277 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.438285 12987 net.cpp:165] Memory required for data: 2691039744 +I0111 18:29:32.438297 12987 layer_factory.hpp:77] Creating layer bn2c_branch2a +I0111 18:29:32.438310 12987 net.cpp:100] Creating Layer bn2c_branch2a +I0111 18:29:32.438318 12987 net.cpp:434] bn2c_branch2a <- res2c_branch2a +I0111 18:29:32.438328 12987 net.cpp:395] bn2c_branch2a -> res2c_branch2a (in-place) +I0111 18:29:32.438730 12987 net.cpp:150] Setting up bn2c_branch2a +I0111 18:29:32.438743 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.438750 12987 net.cpp:165] Memory required for data: 2716729856 +I0111 18:29:32.438762 12987 layer_factory.hpp:77] Creating layer scale2c_branch2a +I0111 18:29:32.438786 12987 net.cpp:100] Creating Layer scale2c_branch2a +I0111 18:29:32.438796 12987 net.cpp:434] scale2c_branch2a <- res2c_branch2a +I0111 18:29:32.438803 12987 net.cpp:395] scale2c_branch2a -> res2c_branch2a (in-place) +I0111 18:29:32.438886 12987 layer_factory.hpp:77] Creating layer scale2c_branch2a +I0111 18:29:32.439129 12987 net.cpp:150] Setting up scale2c_branch2a +I0111 18:29:32.439144 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.439151 12987 net.cpp:165] Memory required for data: 2742419968 +I0111 18:29:32.439163 12987 layer_factory.hpp:77] Creating layer res2c_branch2a_relu +I0111 18:29:32.439175 12987 net.cpp:100] Creating Layer res2c_branch2a_relu +I0111 18:29:32.439182 12987 net.cpp:434] res2c_branch2a_relu <- res2c_branch2a +I0111 18:29:32.439198 12987 net.cpp:395] res2c_branch2a_relu -> res2c_branch2a (in-place) +I0111 18:29:32.440683 12987 net.cpp:150] Setting up res2c_branch2a_relu +I0111 18:29:32.440708 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.440714 12987 net.cpp:165] Memory required for data: 2768110080 +I0111 18:29:32.440721 12987 layer_factory.hpp:77] Creating layer res2c_branch2b +I0111 18:29:32.440739 12987 net.cpp:100] Creating Layer res2c_branch2b +I0111 18:29:32.440748 12987 net.cpp:434] res2c_branch2b <- res2c_branch2a +I0111 18:29:32.440759 12987 net.cpp:408] res2c_branch2b -> res2c_branch2b +I0111 18:29:32.445277 12987 net.cpp:150] Setting up res2c_branch2b +I0111 18:29:32.445315 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.445327 12987 net.cpp:165] Memory required for data: 2793800192 +I0111 18:29:32.445336 12987 layer_factory.hpp:77] Creating layer bn2c_branch2b +I0111 18:29:32.445350 12987 net.cpp:100] Creating Layer bn2c_branch2b +I0111 18:29:32.445359 12987 net.cpp:434] bn2c_branch2b <- res2c_branch2b +I0111 18:29:32.445369 12987 net.cpp:395] bn2c_branch2b -> res2c_branch2b (in-place) +I0111 18:29:32.445737 12987 net.cpp:150] Setting up bn2c_branch2b +I0111 18:29:32.445751 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.445756 12987 net.cpp:165] Memory required for data: 2819490304 +I0111 18:29:32.445766 12987 layer_factory.hpp:77] Creating layer scale2c_branch2b +I0111 18:29:32.445791 12987 net.cpp:100] Creating Layer scale2c_branch2b +I0111 18:29:32.445797 12987 net.cpp:434] scale2c_branch2b <- res2c_branch2b +I0111 18:29:32.445806 12987 net.cpp:395] scale2c_branch2b -> res2c_branch2b (in-place) +I0111 18:29:32.445890 12987 layer_factory.hpp:77] Creating layer scale2c_branch2b +I0111 18:29:32.446120 12987 net.cpp:150] Setting up scale2c_branch2b +I0111 18:29:32.446142 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.446147 12987 net.cpp:165] Memory required for data: 2845180416 +I0111 18:29:32.446157 12987 layer_factory.hpp:77] Creating layer res2c_branch2b_relu +I0111 18:29:32.446168 12987 net.cpp:100] Creating Layer res2c_branch2b_relu +I0111 18:29:32.446173 12987 net.cpp:434] res2c_branch2b_relu <- res2c_branch2b +I0111 18:29:32.446180 12987 net.cpp:395] res2c_branch2b_relu -> res2c_branch2b (in-place) +I0111 18:29:32.446513 12987 net.cpp:150] Setting up res2c_branch2b_relu +I0111 18:29:32.446530 12987 net.cpp:157] Top shape: 32 64 56 56 (6422528) +I0111 18:29:32.446535 12987 net.cpp:165] Memory required for data: 2870870528 +I0111 18:29:32.446540 12987 layer_factory.hpp:77] Creating layer res2c_branch2c +I0111 18:29:32.446557 12987 net.cpp:100] Creating Layer res2c_branch2c +I0111 18:29:32.446563 12987 net.cpp:434] res2c_branch2c <- res2c_branch2b +I0111 18:29:32.446574 12987 net.cpp:408] res2c_branch2c -> res2c_branch2c +I0111 18:29:32.451428 12987 net.cpp:150] Setting up res2c_branch2c +I0111 18:29:32.451454 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.451460 12987 net.cpp:165] Memory required for data: 2973630976 +I0111 18:29:32.451469 12987 layer_factory.hpp:77] Creating layer bn2c_branch2c +I0111 18:29:32.451483 12987 net.cpp:100] Creating Layer bn2c_branch2c +I0111 18:29:32.451488 12987 net.cpp:434] bn2c_branch2c <- res2c_branch2c +I0111 18:29:32.451508 12987 net.cpp:395] bn2c_branch2c -> res2c_branch2c (in-place) +I0111 18:29:32.451882 12987 net.cpp:150] Setting up bn2c_branch2c +I0111 18:29:32.451900 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.451903 12987 net.cpp:165] Memory required for data: 3076391424 +I0111 18:29:32.451930 12987 layer_factory.hpp:77] Creating layer scale2c_branch2c +I0111 18:29:32.451953 12987 net.cpp:100] Creating Layer scale2c_branch2c +I0111 18:29:32.451962 12987 net.cpp:434] scale2c_branch2c <- res2c_branch2c +I0111 18:29:32.451972 12987 net.cpp:395] scale2c_branch2c -> res2c_branch2c (in-place) +I0111 18:29:32.452050 12987 layer_factory.hpp:77] Creating layer scale2c_branch2c +I0111 18:29:32.452265 12987 net.cpp:150] Setting up scale2c_branch2c +I0111 18:29:32.452280 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.452286 12987 net.cpp:165] Memory required for data: 3179151872 +I0111 18:29:32.452296 12987 layer_factory.hpp:77] Creating layer res2c +I0111 18:29:32.452306 12987 net.cpp:100] Creating Layer res2c +I0111 18:29:32.452312 12987 net.cpp:434] res2c <- res2b_res2b_relu_0_split_1 +I0111 18:29:32.452320 12987 net.cpp:434] res2c <- res2c_branch2c +I0111 18:29:32.452332 12987 net.cpp:408] res2c -> res2c +I0111 18:29:32.452376 12987 net.cpp:150] Setting up res2c +I0111 18:29:32.452389 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.452394 12987 net.cpp:165] Memory required for data: 3281912320 +I0111 18:29:32.452399 12987 layer_factory.hpp:77] Creating layer res2c_relu +I0111 18:29:32.452406 12987 net.cpp:100] Creating Layer res2c_relu +I0111 18:29:32.452415 12987 net.cpp:434] res2c_relu <- res2c +I0111 18:29:32.452424 12987 net.cpp:395] res2c_relu -> res2c (in-place) +I0111 18:29:32.453862 12987 net.cpp:150] Setting up res2c_relu +I0111 18:29:32.453886 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.453891 12987 net.cpp:165] Memory required for data: 3384672768 +I0111 18:29:32.453896 12987 layer_factory.hpp:77] Creating layer res2c_res2c_relu_0_split +I0111 18:29:32.453907 12987 net.cpp:100] Creating Layer res2c_res2c_relu_0_split +I0111 18:29:32.453913 12987 net.cpp:434] res2c_res2c_relu_0_split <- res2c +I0111 18:29:32.453927 12987 net.cpp:408] res2c_res2c_relu_0_split -> res2c_res2c_relu_0_split_0 +I0111 18:29:32.453943 12987 net.cpp:408] res2c_res2c_relu_0_split -> res2c_res2c_relu_0_split_1 +I0111 18:29:32.454025 12987 net.cpp:150] Setting up res2c_res2c_relu_0_split +I0111 18:29:32.454037 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.454046 12987 net.cpp:157] Top shape: 32 256 56 56 (25690112) +I0111 18:29:32.454061 12987 net.cpp:165] Memory required for data: 3590193664 +I0111 18:29:32.454078 12987 layer_factory.hpp:77] Creating layer res3a_branch1 +I0111 18:29:32.454097 12987 net.cpp:100] Creating Layer res3a_branch1 +I0111 18:29:32.454107 12987 net.cpp:434] res3a_branch1 <- res2c_res2c_relu_0_split_0 +I0111 18:29:32.454116 12987 net.cpp:408] res3a_branch1 -> res3a_branch1 +I0111 18:29:32.459952 12987 net.cpp:150] Setting up res3a_branch1 +I0111 18:29:32.459977 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.459983 12987 net.cpp:165] Memory required for data: 3641573888 +I0111 18:29:32.459992 12987 layer_factory.hpp:77] Creating layer bn3a_branch1 +I0111 18:29:32.460008 12987 net.cpp:100] Creating Layer bn3a_branch1 +I0111 18:29:32.460016 12987 net.cpp:434] bn3a_branch1 <- res3a_branch1 +I0111 18:29:32.460026 12987 net.cpp:395] bn3a_branch1 -> res3a_branch1 (in-place) +I0111 18:29:32.461427 12987 net.cpp:150] Setting up bn3a_branch1 +I0111 18:29:32.461449 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.461458 12987 net.cpp:165] Memory required for data: 3692954112 +I0111 18:29:32.461472 12987 layer_factory.hpp:77] Creating layer scale3a_branch1 +I0111 18:29:32.461486 12987 net.cpp:100] Creating Layer scale3a_branch1 +I0111 18:29:32.461494 12987 net.cpp:434] scale3a_branch1 <- res3a_branch1 +I0111 18:29:32.461503 12987 net.cpp:395] scale3a_branch1 -> res3a_branch1 (in-place) +I0111 18:29:32.461575 12987 layer_factory.hpp:77] Creating layer scale3a_branch1 +I0111 18:29:32.461762 12987 net.cpp:150] Setting up scale3a_branch1 +I0111 18:29:32.461776 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.461781 12987 net.cpp:165] Memory required for data: 3744334336 +I0111 18:29:32.461789 12987 layer_factory.hpp:77] Creating layer res3a_branch2a +I0111 18:29:32.461807 12987 net.cpp:100] Creating Layer res3a_branch2a +I0111 18:29:32.461814 12987 net.cpp:434] res3a_branch2a <- res2c_res2c_relu_0_split_1 +I0111 18:29:32.461827 12987 net.cpp:408] res3a_branch2a -> res3a_branch2a +I0111 18:29:32.466696 12987 net.cpp:150] Setting up res3a_branch2a +I0111 18:29:32.466722 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.466730 12987 net.cpp:165] Memory required for data: 3757179392 +I0111 18:29:32.466748 12987 layer_factory.hpp:77] Creating layer bn3a_branch2a +I0111 18:29:32.466761 12987 net.cpp:100] Creating Layer bn3a_branch2a +I0111 18:29:32.466769 12987 net.cpp:434] bn3a_branch2a <- res3a_branch2a +I0111 18:29:32.466779 12987 net.cpp:395] bn3a_branch2a -> res3a_branch2a (in-place) +I0111 18:29:32.467110 12987 net.cpp:150] Setting up bn3a_branch2a +I0111 18:29:32.467123 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.467128 12987 net.cpp:165] Memory required for data: 3770024448 +I0111 18:29:32.467139 12987 layer_factory.hpp:77] Creating layer scale3a_branch2a +I0111 18:29:32.467149 12987 net.cpp:100] Creating Layer scale3a_branch2a +I0111 18:29:32.467154 12987 net.cpp:434] scale3a_branch2a <- res3a_branch2a +I0111 18:29:32.467164 12987 net.cpp:395] scale3a_branch2a -> res3a_branch2a (in-place) +I0111 18:29:32.467231 12987 layer_factory.hpp:77] Creating layer scale3a_branch2a +I0111 18:29:32.467411 12987 net.cpp:150] Setting up scale3a_branch2a +I0111 18:29:32.467424 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.467428 12987 net.cpp:165] Memory required for data: 3782869504 +I0111 18:29:32.467437 12987 layer_factory.hpp:77] Creating layer res3a_branch2a_relu +I0111 18:29:32.467447 12987 net.cpp:100] Creating Layer res3a_branch2a_relu +I0111 18:29:32.467453 12987 net.cpp:434] res3a_branch2a_relu <- res3a_branch2a +I0111 18:29:32.467465 12987 net.cpp:395] res3a_branch2a_relu -> res3a_branch2a (in-place) +I0111 18:29:32.469094 12987 net.cpp:150] Setting up res3a_branch2a_relu +I0111 18:29:32.469116 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.469123 12987 net.cpp:165] Memory required for data: 3795714560 +I0111 18:29:32.469127 12987 layer_factory.hpp:77] Creating layer res3a_branch2b +I0111 18:29:32.469156 12987 net.cpp:100] Creating Layer res3a_branch2b +I0111 18:29:32.469166 12987 net.cpp:434] res3a_branch2b <- res3a_branch2a +I0111 18:29:32.469173 12987 net.cpp:408] res3a_branch2b -> res3a_branch2b +I0111 18:29:32.473582 12987 net.cpp:150] Setting up res3a_branch2b +I0111 18:29:32.473603 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.473608 12987 net.cpp:165] Memory required for data: 3808559616 +I0111 18:29:32.473618 12987 layer_factory.hpp:77] Creating layer bn3a_branch2b +I0111 18:29:32.473633 12987 net.cpp:100] Creating Layer bn3a_branch2b +I0111 18:29:32.473639 12987 net.cpp:434] bn3a_branch2b <- res3a_branch2b +I0111 18:29:32.473645 12987 net.cpp:395] bn3a_branch2b -> res3a_branch2b (in-place) +I0111 18:29:32.473884 12987 net.cpp:150] Setting up bn3a_branch2b +I0111 18:29:32.473893 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.473897 12987 net.cpp:165] Memory required for data: 3821404672 +I0111 18:29:32.473906 12987 layer_factory.hpp:77] Creating layer scale3a_branch2b +I0111 18:29:32.473913 12987 net.cpp:100] Creating Layer scale3a_branch2b +I0111 18:29:32.473919 12987 net.cpp:434] scale3a_branch2b <- res3a_branch2b +I0111 18:29:32.473927 12987 net.cpp:395] scale3a_branch2b -> res3a_branch2b (in-place) +I0111 18:29:32.473986 12987 layer_factory.hpp:77] Creating layer scale3a_branch2b +I0111 18:29:32.474123 12987 net.cpp:150] Setting up scale3a_branch2b +I0111 18:29:32.474133 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.474135 12987 net.cpp:165] Memory required for data: 3834249728 +I0111 18:29:32.474143 12987 layer_factory.hpp:77] Creating layer res3a_branch2b_relu +I0111 18:29:32.474158 12987 net.cpp:100] Creating Layer res3a_branch2b_relu +I0111 18:29:32.474165 12987 net.cpp:434] res3a_branch2b_relu <- res3a_branch2b +I0111 18:29:32.474172 12987 net.cpp:395] res3a_branch2b_relu -> res3a_branch2b (in-place) +I0111 18:29:32.475109 12987 net.cpp:150] Setting up res3a_branch2b_relu +I0111 18:29:32.475126 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.475136 12987 net.cpp:165] Memory required for data: 3847094784 +I0111 18:29:32.475142 12987 layer_factory.hpp:77] Creating layer res3a_branch2c +I0111 18:29:32.475174 12987 net.cpp:100] Creating Layer res3a_branch2c +I0111 18:29:32.475183 12987 net.cpp:434] res3a_branch2c <- res3a_branch2b +I0111 18:29:32.475190 12987 net.cpp:408] res3a_branch2c -> res3a_branch2c +I0111 18:29:32.480370 12987 net.cpp:150] Setting up res3a_branch2c +I0111 18:29:32.480407 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.480423 12987 net.cpp:165] Memory required for data: 3898475008 +I0111 18:29:32.480448 12987 layer_factory.hpp:77] Creating layer bn3a_branch2c +I0111 18:29:32.480466 12987 net.cpp:100] Creating Layer bn3a_branch2c +I0111 18:29:32.480479 12987 net.cpp:434] bn3a_branch2c <- res3a_branch2c +I0111 18:29:32.480501 12987 net.cpp:395] bn3a_branch2c -> res3a_branch2c (in-place) +I0111 18:29:32.480949 12987 net.cpp:150] Setting up bn3a_branch2c +I0111 18:29:32.480967 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.480981 12987 net.cpp:165] Memory required for data: 3949855232 +I0111 18:29:32.481007 12987 layer_factory.hpp:77] Creating layer scale3a_branch2c +I0111 18:29:32.481024 12987 net.cpp:100] Creating Layer scale3a_branch2c +I0111 18:29:32.481036 12987 net.cpp:434] scale3a_branch2c <- res3a_branch2c +I0111 18:29:32.481053 12987 net.cpp:395] scale3a_branch2c -> res3a_branch2c (in-place) +I0111 18:29:32.481161 12987 layer_factory.hpp:77] Creating layer scale3a_branch2c +I0111 18:29:32.481411 12987 net.cpp:150] Setting up scale3a_branch2c +I0111 18:29:32.481431 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.481443 12987 net.cpp:165] Memory required for data: 4001235456 +I0111 18:29:32.481462 12987 layer_factory.hpp:77] Creating layer res3a +I0111 18:29:32.481482 12987 net.cpp:100] Creating Layer res3a +I0111 18:29:32.481493 12987 net.cpp:434] res3a <- res3a_branch1 +I0111 18:29:32.481510 12987 net.cpp:434] res3a <- res3a_branch2c +I0111 18:29:32.481534 12987 net.cpp:408] res3a -> res3a +I0111 18:29:32.481622 12987 net.cpp:150] Setting up res3a +I0111 18:29:32.481642 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.481654 12987 net.cpp:165] Memory required for data: 4052615680 +I0111 18:29:32.481667 12987 layer_factory.hpp:77] Creating layer res3a_relu +I0111 18:29:32.481685 12987 net.cpp:100] Creating Layer res3a_relu +I0111 18:29:32.481698 12987 net.cpp:434] res3a_relu <- res3a +I0111 18:29:32.481716 12987 net.cpp:395] res3a_relu -> res3a (in-place) +I0111 18:29:32.482144 12987 net.cpp:150] Setting up res3a_relu +I0111 18:29:32.482168 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.482185 12987 net.cpp:165] Memory required for data: 4103995904 +I0111 18:29:32.482203 12987 layer_factory.hpp:77] Creating layer res3a_res3a_relu_0_split +I0111 18:29:32.482221 12987 net.cpp:100] Creating Layer res3a_res3a_relu_0_split +I0111 18:29:32.482234 12987 net.cpp:434] res3a_res3a_relu_0_split <- res3a +I0111 18:29:32.482254 12987 net.cpp:408] res3a_res3a_relu_0_split -> res3a_res3a_relu_0_split_0 +I0111 18:29:32.482280 12987 net.cpp:408] res3a_res3a_relu_0_split -> res3a_res3a_relu_0_split_1 +I0111 18:29:32.482379 12987 net.cpp:150] Setting up res3a_res3a_relu_0_split +I0111 18:29:32.482396 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.482414 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.482425 12987 net.cpp:165] Memory required for data: 4206756352 +I0111 18:29:32.482434 12987 layer_factory.hpp:77] Creating layer res3b_branch2a +I0111 18:29:32.482465 12987 net.cpp:100] Creating Layer res3b_branch2a +I0111 18:29:32.482475 12987 net.cpp:434] res3b_branch2a <- res3a_res3a_relu_0_split_0 +I0111 18:29:32.482499 12987 net.cpp:408] res3b_branch2a -> res3b_branch2a +I0111 18:29:32.488919 12987 net.cpp:150] Setting up res3b_branch2a +I0111 18:29:32.488955 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.488970 12987 net.cpp:165] Memory required for data: 4219601408 +I0111 18:29:32.488989 12987 layer_factory.hpp:77] Creating layer bn3b_branch2a +I0111 18:29:32.489008 12987 net.cpp:100] Creating Layer bn3b_branch2a +I0111 18:29:32.489019 12987 net.cpp:434] bn3b_branch2a <- res3b_branch2a +I0111 18:29:32.489037 12987 net.cpp:395] bn3b_branch2a -> res3b_branch2a (in-place) +I0111 18:29:32.489449 12987 net.cpp:150] Setting up bn3b_branch2a +I0111 18:29:32.489465 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.489473 12987 net.cpp:165] Memory required for data: 4232446464 +I0111 18:29:32.489486 12987 layer_factory.hpp:77] Creating layer scale3b_branch2a +I0111 18:29:32.489503 12987 net.cpp:100] Creating Layer scale3b_branch2a +I0111 18:29:32.489516 12987 net.cpp:434] scale3b_branch2a <- res3b_branch2a +I0111 18:29:32.489528 12987 net.cpp:395] scale3b_branch2a -> res3b_branch2a (in-place) +I0111 18:29:32.489619 12987 layer_factory.hpp:77] Creating layer scale3b_branch2a +I0111 18:29:32.489853 12987 net.cpp:150] Setting up scale3b_branch2a +I0111 18:29:32.489869 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.489877 12987 net.cpp:165] Memory required for data: 4245291520 +I0111 18:29:32.489890 12987 layer_factory.hpp:77] Creating layer res3b_branch2a_relu +I0111 18:29:32.489903 12987 net.cpp:100] Creating Layer res3b_branch2a_relu +I0111 18:29:32.489915 12987 net.cpp:434] res3b_branch2a_relu <- res3b_branch2a +I0111 18:29:32.489936 12987 net.cpp:395] res3b_branch2a_relu -> res3b_branch2a (in-place) +I0111 18:29:32.491436 12987 net.cpp:150] Setting up res3b_branch2a_relu +I0111 18:29:32.491464 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.491478 12987 net.cpp:165] Memory required for data: 4258136576 +I0111 18:29:32.491492 12987 layer_factory.hpp:77] Creating layer res3b_branch2b +I0111 18:29:32.491520 12987 net.cpp:100] Creating Layer res3b_branch2b +I0111 18:29:32.491531 12987 net.cpp:434] res3b_branch2b <- res3b_branch2a +I0111 18:29:32.491549 12987 net.cpp:408] res3b_branch2b -> res3b_branch2b +I0111 18:29:32.498271 12987 net.cpp:150] Setting up res3b_branch2b +I0111 18:29:32.498301 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.498325 12987 net.cpp:165] Memory required for data: 4270981632 +I0111 18:29:32.498344 12987 layer_factory.hpp:77] Creating layer bn3b_branch2b +I0111 18:29:32.498366 12987 net.cpp:100] Creating Layer bn3b_branch2b +I0111 18:29:32.498376 12987 net.cpp:434] bn3b_branch2b <- res3b_branch2b +I0111 18:29:32.498392 12987 net.cpp:395] bn3b_branch2b -> res3b_branch2b (in-place) +I0111 18:29:32.498785 12987 net.cpp:150] Setting up bn3b_branch2b +I0111 18:29:32.498801 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.498809 12987 net.cpp:165] Memory required for data: 4283826688 +I0111 18:29:32.498821 12987 layer_factory.hpp:77] Creating layer scale3b_branch2b +I0111 18:29:32.498834 12987 net.cpp:100] Creating Layer scale3b_branch2b +I0111 18:29:32.498847 12987 net.cpp:434] scale3b_branch2b <- res3b_branch2b +I0111 18:29:32.498862 12987 net.cpp:395] scale3b_branch2b -> res3b_branch2b (in-place) +I0111 18:29:32.498944 12987 layer_factory.hpp:77] Creating layer scale3b_branch2b +I0111 18:29:32.499157 12987 net.cpp:150] Setting up scale3b_branch2b +I0111 18:29:32.499174 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.499181 12987 net.cpp:165] Memory required for data: 4296671744 +I0111 18:29:32.499193 12987 layer_factory.hpp:77] Creating layer res3b_branch2b_relu +I0111 18:29:32.499204 12987 net.cpp:100] Creating Layer res3b_branch2b_relu +I0111 18:29:32.499214 12987 net.cpp:434] res3b_branch2b_relu <- res3b_branch2b +I0111 18:29:32.499222 12987 net.cpp:395] res3b_branch2b_relu -> res3b_branch2b (in-place) +I0111 18:29:32.500701 12987 net.cpp:150] Setting up res3b_branch2b_relu +I0111 18:29:32.500727 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.500735 12987 net.cpp:165] Memory required for data: 4309516800 +I0111 18:29:32.500741 12987 layer_factory.hpp:77] Creating layer res3b_branch2c +I0111 18:29:32.500762 12987 net.cpp:100] Creating Layer res3b_branch2c +I0111 18:29:32.500771 12987 net.cpp:434] res3b_branch2c <- res3b_branch2b +I0111 18:29:32.500782 12987 net.cpp:408] res3b_branch2c -> res3b_branch2c +I0111 18:29:32.506614 12987 net.cpp:150] Setting up res3b_branch2c +I0111 18:29:32.506654 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.506661 12987 net.cpp:165] Memory required for data: 4360897024 +I0111 18:29:32.506674 12987 layer_factory.hpp:77] Creating layer bn3b_branch2c +I0111 18:29:32.506690 12987 net.cpp:100] Creating Layer bn3b_branch2c +I0111 18:29:32.506703 12987 net.cpp:434] bn3b_branch2c <- res3b_branch2c +I0111 18:29:32.506717 12987 net.cpp:395] bn3b_branch2c -> res3b_branch2c (in-place) +I0111 18:29:32.507176 12987 net.cpp:150] Setting up bn3b_branch2c +I0111 18:29:32.507194 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.507201 12987 net.cpp:165] Memory required for data: 4412277248 +I0111 18:29:32.507216 12987 layer_factory.hpp:77] Creating layer scale3b_branch2c +I0111 18:29:32.507244 12987 net.cpp:100] Creating Layer scale3b_branch2c +I0111 18:29:32.507254 12987 net.cpp:434] scale3b_branch2c <- res3b_branch2c +I0111 18:29:32.507267 12987 net.cpp:395] scale3b_branch2c -> res3b_branch2c (in-place) +I0111 18:29:32.507366 12987 layer_factory.hpp:77] Creating layer scale3b_branch2c +I0111 18:29:32.507616 12987 net.cpp:150] Setting up scale3b_branch2c +I0111 18:29:32.507635 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.507643 12987 net.cpp:165] Memory required for data: 4463657472 +I0111 18:29:32.507655 12987 layer_factory.hpp:77] Creating layer res3b +I0111 18:29:32.507668 12987 net.cpp:100] Creating Layer res3b +I0111 18:29:32.507678 12987 net.cpp:434] res3b <- res3a_res3a_relu_0_split_1 +I0111 18:29:32.507686 12987 net.cpp:434] res3b <- res3b_branch2c +I0111 18:29:32.507699 12987 net.cpp:408] res3b -> res3b +I0111 18:29:32.507756 12987 net.cpp:150] Setting up res3b +I0111 18:29:32.507771 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.507776 12987 net.cpp:165] Memory required for data: 4515037696 +I0111 18:29:32.507782 12987 layer_factory.hpp:77] Creating layer res3b_relu +I0111 18:29:32.507809 12987 net.cpp:100] Creating Layer res3b_relu +I0111 18:29:32.507817 12987 net.cpp:434] res3b_relu <- res3b +I0111 18:29:32.507825 12987 net.cpp:395] res3b_relu -> res3b (in-place) +I0111 18:29:32.509487 12987 net.cpp:150] Setting up res3b_relu +I0111 18:29:32.509517 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.509526 12987 net.cpp:165] Memory required for data: 4566417920 +I0111 18:29:32.509533 12987 layer_factory.hpp:77] Creating layer res3b_res3b_relu_0_split +I0111 18:29:32.509552 12987 net.cpp:100] Creating Layer res3b_res3b_relu_0_split +I0111 18:29:32.509562 12987 net.cpp:434] res3b_res3b_relu_0_split <- res3b +I0111 18:29:32.509572 12987 net.cpp:408] res3b_res3b_relu_0_split -> res3b_res3b_relu_0_split_0 +I0111 18:29:32.509599 12987 net.cpp:408] res3b_res3b_relu_0_split -> res3b_res3b_relu_0_split_1 +I0111 18:29:32.509704 12987 net.cpp:150] Setting up res3b_res3b_relu_0_split +I0111 18:29:32.509721 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.509732 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.509737 12987 net.cpp:165] Memory required for data: 4669178368 +I0111 18:29:32.509742 12987 layer_factory.hpp:77] Creating layer res3c_branch2a +I0111 18:29:32.509776 12987 net.cpp:100] Creating Layer res3c_branch2a +I0111 18:29:32.509786 12987 net.cpp:434] res3c_branch2a <- res3b_res3b_relu_0_split_0 +I0111 18:29:32.509802 12987 net.cpp:408] res3c_branch2a -> res3c_branch2a +I0111 18:29:32.515560 12987 net.cpp:150] Setting up res3c_branch2a +I0111 18:29:32.515594 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.515602 12987 net.cpp:165] Memory required for data: 4682023424 +I0111 18:29:32.515614 12987 layer_factory.hpp:77] Creating layer bn3c_branch2a +I0111 18:29:32.515628 12987 net.cpp:100] Creating Layer bn3c_branch2a +I0111 18:29:32.515637 12987 net.cpp:434] bn3c_branch2a <- res3c_branch2a +I0111 18:29:32.515650 12987 net.cpp:395] bn3c_branch2a -> res3c_branch2a (in-place) +I0111 18:29:32.516080 12987 net.cpp:150] Setting up bn3c_branch2a +I0111 18:29:32.516096 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.516101 12987 net.cpp:165] Memory required for data: 4694868480 +I0111 18:29:32.516114 12987 layer_factory.hpp:77] Creating layer scale3c_branch2a +I0111 18:29:32.516130 12987 net.cpp:100] Creating Layer scale3c_branch2a +I0111 18:29:32.516139 12987 net.cpp:434] scale3c_branch2a <- res3c_branch2a +I0111 18:29:32.516147 12987 net.cpp:395] scale3c_branch2a -> res3c_branch2a (in-place) +I0111 18:29:32.516235 12987 layer_factory.hpp:77] Creating layer scale3c_branch2a +I0111 18:29:32.516477 12987 net.cpp:150] Setting up scale3c_branch2a +I0111 18:29:32.516494 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.516499 12987 net.cpp:165] Memory required for data: 4707713536 +I0111 18:29:32.516508 12987 layer_factory.hpp:77] Creating layer res3c_branch2a_relu +I0111 18:29:32.516520 12987 net.cpp:100] Creating Layer res3c_branch2a_relu +I0111 18:29:32.516528 12987 net.cpp:434] res3c_branch2a_relu <- res3c_branch2a +I0111 18:29:32.516540 12987 net.cpp:395] res3c_branch2a_relu -> res3c_branch2a (in-place) +I0111 18:29:32.518084 12987 net.cpp:150] Setting up res3c_branch2a_relu +I0111 18:29:32.518110 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.518118 12987 net.cpp:165] Memory required for data: 4720558592 +I0111 18:29:32.518124 12987 layer_factory.hpp:77] Creating layer res3c_branch2b +I0111 18:29:32.518158 12987 net.cpp:100] Creating Layer res3c_branch2b +I0111 18:29:32.518168 12987 net.cpp:434] res3c_branch2b <- res3c_branch2a +I0111 18:29:32.518182 12987 net.cpp:408] res3c_branch2b -> res3c_branch2b +I0111 18:29:32.525202 12987 net.cpp:150] Setting up res3c_branch2b +I0111 18:29:32.525244 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.525254 12987 net.cpp:165] Memory required for data: 4733403648 +I0111 18:29:32.525265 12987 layer_factory.hpp:77] Creating layer bn3c_branch2b +I0111 18:29:32.525284 12987 net.cpp:100] Creating Layer bn3c_branch2b +I0111 18:29:32.525305 12987 net.cpp:434] bn3c_branch2b <- res3c_branch2b +I0111 18:29:32.525317 12987 net.cpp:395] bn3c_branch2b -> res3c_branch2b (in-place) +I0111 18:29:32.525749 12987 net.cpp:150] Setting up bn3c_branch2b +I0111 18:29:32.525765 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.525770 12987 net.cpp:165] Memory required for data: 4746248704 +I0111 18:29:32.525782 12987 layer_factory.hpp:77] Creating layer scale3c_branch2b +I0111 18:29:32.525795 12987 net.cpp:100] Creating Layer scale3c_branch2b +I0111 18:29:32.525804 12987 net.cpp:434] scale3c_branch2b <- res3c_branch2b +I0111 18:29:32.525813 12987 net.cpp:395] scale3c_branch2b -> res3c_branch2b (in-place) +I0111 18:29:32.525894 12987 layer_factory.hpp:77] Creating layer scale3c_branch2b +I0111 18:29:32.526134 12987 net.cpp:150] Setting up scale3c_branch2b +I0111 18:29:32.526151 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.526159 12987 net.cpp:165] Memory required for data: 4759093760 +I0111 18:29:32.526168 12987 layer_factory.hpp:77] Creating layer res3c_branch2b_relu +I0111 18:29:32.526180 12987 net.cpp:100] Creating Layer res3c_branch2b_relu +I0111 18:29:32.526187 12987 net.cpp:434] res3c_branch2b_relu <- res3c_branch2b +I0111 18:29:32.526195 12987 net.cpp:395] res3c_branch2b_relu -> res3c_branch2b (in-place) +I0111 18:29:32.527658 12987 net.cpp:150] Setting up res3c_branch2b_relu +I0111 18:29:32.527683 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.527691 12987 net.cpp:165] Memory required for data: 4771938816 +I0111 18:29:32.527707 12987 layer_factory.hpp:77] Creating layer res3c_branch2c +I0111 18:29:32.527729 12987 net.cpp:100] Creating Layer res3c_branch2c +I0111 18:29:32.527739 12987 net.cpp:434] res3c_branch2c <- res3c_branch2b +I0111 18:29:32.527750 12987 net.cpp:408] res3c_branch2c -> res3c_branch2c +I0111 18:29:32.532733 12987 net.cpp:150] Setting up res3c_branch2c +I0111 18:29:32.532760 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.532768 12987 net.cpp:165] Memory required for data: 4823319040 +I0111 18:29:32.532778 12987 layer_factory.hpp:77] Creating layer bn3c_branch2c +I0111 18:29:32.532791 12987 net.cpp:100] Creating Layer bn3c_branch2c +I0111 18:29:32.532800 12987 net.cpp:434] bn3c_branch2c <- res3c_branch2c +I0111 18:29:32.532814 12987 net.cpp:395] bn3c_branch2c -> res3c_branch2c (in-place) +I0111 18:29:32.533208 12987 net.cpp:150] Setting up bn3c_branch2c +I0111 18:29:32.533222 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.533231 12987 net.cpp:165] Memory required for data: 4874699264 +I0111 18:29:32.533241 12987 layer_factory.hpp:77] Creating layer scale3c_branch2c +I0111 18:29:32.533253 12987 net.cpp:100] Creating Layer scale3c_branch2c +I0111 18:29:32.533262 12987 net.cpp:434] scale3c_branch2c <- res3c_branch2c +I0111 18:29:32.533269 12987 net.cpp:395] scale3c_branch2c -> res3c_branch2c (in-place) +I0111 18:29:32.533350 12987 layer_factory.hpp:77] Creating layer scale3c_branch2c +I0111 18:29:32.533563 12987 net.cpp:150] Setting up scale3c_branch2c +I0111 18:29:32.533577 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.533582 12987 net.cpp:165] Memory required for data: 4926079488 +I0111 18:29:32.533591 12987 layer_factory.hpp:77] Creating layer res3c +I0111 18:29:32.533605 12987 net.cpp:100] Creating Layer res3c +I0111 18:29:32.533614 12987 net.cpp:434] res3c <- res3b_res3b_relu_0_split_1 +I0111 18:29:32.533622 12987 net.cpp:434] res3c <- res3c_branch2c +I0111 18:29:32.533629 12987 net.cpp:408] res3c -> res3c +I0111 18:29:32.533679 12987 net.cpp:150] Setting up res3c +I0111 18:29:32.533694 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.533701 12987 net.cpp:165] Memory required for data: 4977459712 +I0111 18:29:32.533716 12987 layer_factory.hpp:77] Creating layer res3c_relu +I0111 18:29:32.533728 12987 net.cpp:100] Creating Layer res3c_relu +I0111 18:29:32.533735 12987 net.cpp:434] res3c_relu <- res3c +I0111 18:29:32.533747 12987 net.cpp:395] res3c_relu -> res3c (in-place) +I0111 18:29:32.534092 12987 net.cpp:150] Setting up res3c_relu +I0111 18:29:32.534121 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.534127 12987 net.cpp:165] Memory required for data: 5028839936 +I0111 18:29:32.534133 12987 layer_factory.hpp:77] Creating layer res3c_res3c_relu_0_split +I0111 18:29:32.534143 12987 net.cpp:100] Creating Layer res3c_res3c_relu_0_split +I0111 18:29:32.534152 12987 net.cpp:434] res3c_res3c_relu_0_split <- res3c +I0111 18:29:32.534168 12987 net.cpp:408] res3c_res3c_relu_0_split -> res3c_res3c_relu_0_split_0 +I0111 18:29:32.534183 12987 net.cpp:408] res3c_res3c_relu_0_split -> res3c_res3c_relu_0_split_1 +I0111 18:29:32.534273 12987 net.cpp:150] Setting up res3c_res3c_relu_0_split +I0111 18:29:32.534287 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.534294 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.534301 12987 net.cpp:165] Memory required for data: 5131600384 +I0111 18:29:32.534307 12987 layer_factory.hpp:77] Creating layer res3d_branch2a +I0111 18:29:32.534322 12987 net.cpp:100] Creating Layer res3d_branch2a +I0111 18:29:32.534330 12987 net.cpp:434] res3d_branch2a <- res3c_res3c_relu_0_split_0 +I0111 18:29:32.534342 12987 net.cpp:408] res3d_branch2a -> res3d_branch2a +I0111 18:29:32.540148 12987 net.cpp:150] Setting up res3d_branch2a +I0111 18:29:32.540174 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.540182 12987 net.cpp:165] Memory required for data: 5144445440 +I0111 18:29:32.540192 12987 layer_factory.hpp:77] Creating layer bn3d_branch2a +I0111 18:29:32.540207 12987 net.cpp:100] Creating Layer bn3d_branch2a +I0111 18:29:32.540215 12987 net.cpp:434] bn3d_branch2a <- res3d_branch2a +I0111 18:29:32.540225 12987 net.cpp:395] bn3d_branch2a -> res3d_branch2a (in-place) +I0111 18:29:32.540602 12987 net.cpp:150] Setting up bn3d_branch2a +I0111 18:29:32.540616 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.540624 12987 net.cpp:165] Memory required for data: 5157290496 +I0111 18:29:32.540668 12987 layer_factory.hpp:77] Creating layer scale3d_branch2a +I0111 18:29:32.540683 12987 net.cpp:100] Creating Layer scale3d_branch2a +I0111 18:29:32.540690 12987 net.cpp:434] scale3d_branch2a <- res3d_branch2a +I0111 18:29:32.540699 12987 net.cpp:395] scale3d_branch2a -> res3d_branch2a (in-place) +I0111 18:29:32.540779 12987 layer_factory.hpp:77] Creating layer scale3d_branch2a +I0111 18:29:32.541000 12987 net.cpp:150] Setting up scale3d_branch2a +I0111 18:29:32.541014 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.541019 12987 net.cpp:165] Memory required for data: 5170135552 +I0111 18:29:32.541028 12987 layer_factory.hpp:77] Creating layer res3d_branch2a_relu +I0111 18:29:32.541038 12987 net.cpp:100] Creating Layer res3d_branch2a_relu +I0111 18:29:32.541043 12987 net.cpp:434] res3d_branch2a_relu <- res3d_branch2a +I0111 18:29:32.541054 12987 net.cpp:395] res3d_branch2a_relu -> res3d_branch2a (in-place) +I0111 18:29:32.542446 12987 net.cpp:150] Setting up res3d_branch2a_relu +I0111 18:29:32.542472 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.542479 12987 net.cpp:165] Memory required for data: 5182980608 +I0111 18:29:32.542485 12987 layer_factory.hpp:77] Creating layer res3d_branch2b +I0111 18:29:32.542500 12987 net.cpp:100] Creating Layer res3d_branch2b +I0111 18:29:32.542507 12987 net.cpp:434] res3d_branch2b <- res3d_branch2a +I0111 18:29:32.542520 12987 net.cpp:408] res3d_branch2b -> res3d_branch2b +I0111 18:29:32.548557 12987 net.cpp:150] Setting up res3d_branch2b +I0111 18:29:32.548583 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.548590 12987 net.cpp:165] Memory required for data: 5195825664 +I0111 18:29:32.548599 12987 layer_factory.hpp:77] Creating layer bn3d_branch2b +I0111 18:29:32.548611 12987 net.cpp:100] Creating Layer bn3d_branch2b +I0111 18:29:32.548619 12987 net.cpp:434] bn3d_branch2b <- res3d_branch2b +I0111 18:29:32.548626 12987 net.cpp:395] bn3d_branch2b -> res3d_branch2b (in-place) +I0111 18:29:32.548975 12987 net.cpp:150] Setting up bn3d_branch2b +I0111 18:29:32.548987 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.549005 12987 net.cpp:165] Memory required for data: 5208670720 +I0111 18:29:32.549033 12987 layer_factory.hpp:77] Creating layer scale3d_branch2b +I0111 18:29:32.549048 12987 net.cpp:100] Creating Layer scale3d_branch2b +I0111 18:29:32.549057 12987 net.cpp:434] scale3d_branch2b <- res3d_branch2b +I0111 18:29:32.549063 12987 net.cpp:395] scale3d_branch2b -> res3d_branch2b (in-place) +I0111 18:29:32.549140 12987 layer_factory.hpp:77] Creating layer scale3d_branch2b +I0111 18:29:32.549342 12987 net.cpp:150] Setting up scale3d_branch2b +I0111 18:29:32.549355 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.549360 12987 net.cpp:165] Memory required for data: 5221515776 +I0111 18:29:32.549367 12987 layer_factory.hpp:77] Creating layer res3d_branch2b_relu +I0111 18:29:32.549376 12987 net.cpp:100] Creating Layer res3d_branch2b_relu +I0111 18:29:32.549382 12987 net.cpp:434] res3d_branch2b_relu <- res3d_branch2b +I0111 18:29:32.549391 12987 net.cpp:395] res3d_branch2b_relu -> res3d_branch2b (in-place) +I0111 18:29:32.550693 12987 net.cpp:150] Setting up res3d_branch2b_relu +I0111 18:29:32.550717 12987 net.cpp:157] Top shape: 32 128 28 28 (3211264) +I0111 18:29:32.550725 12987 net.cpp:165] Memory required for data: 5234360832 +I0111 18:29:32.550730 12987 layer_factory.hpp:77] Creating layer res3d_branch2c +I0111 18:29:32.550743 12987 net.cpp:100] Creating Layer res3d_branch2c +I0111 18:29:32.550750 12987 net.cpp:434] res3d_branch2c <- res3d_branch2b +I0111 18:29:32.550762 12987 net.cpp:408] res3d_branch2c -> res3d_branch2c +I0111 18:29:32.555153 12987 net.cpp:150] Setting up res3d_branch2c +I0111 18:29:32.555177 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.555186 12987 net.cpp:165] Memory required for data: 5285741056 +I0111 18:29:32.555193 12987 layer_factory.hpp:77] Creating layer bn3d_branch2c +I0111 18:29:32.555205 12987 net.cpp:100] Creating Layer bn3d_branch2c +I0111 18:29:32.555213 12987 net.cpp:434] bn3d_branch2c <- res3d_branch2c +I0111 18:29:32.555232 12987 net.cpp:395] bn3d_branch2c -> res3d_branch2c (in-place) +I0111 18:29:32.555579 12987 net.cpp:150] Setting up bn3d_branch2c +I0111 18:29:32.555593 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.555598 12987 net.cpp:165] Memory required for data: 5337121280 +I0111 18:29:32.555618 12987 layer_factory.hpp:77] Creating layer scale3d_branch2c +I0111 18:29:32.555630 12987 net.cpp:100] Creating Layer scale3d_branch2c +I0111 18:29:32.555639 12987 net.cpp:434] scale3d_branch2c <- res3d_branch2c +I0111 18:29:32.555646 12987 net.cpp:395] scale3d_branch2c -> res3d_branch2c (in-place) +I0111 18:29:32.555719 12987 layer_factory.hpp:77] Creating layer scale3d_branch2c +I0111 18:29:32.555912 12987 net.cpp:150] Setting up scale3d_branch2c +I0111 18:29:32.555925 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.555929 12987 net.cpp:165] Memory required for data: 5388501504 +I0111 18:29:32.555938 12987 layer_factory.hpp:77] Creating layer res3d +I0111 18:29:32.555948 12987 net.cpp:100] Creating Layer res3d +I0111 18:29:32.555954 12987 net.cpp:434] res3d <- res3c_res3c_relu_0_split_1 +I0111 18:29:32.555960 12987 net.cpp:434] res3d <- res3d_branch2c +I0111 18:29:32.555968 12987 net.cpp:408] res3d -> res3d +I0111 18:29:32.556010 12987 net.cpp:150] Setting up res3d +I0111 18:29:32.556021 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.556026 12987 net.cpp:165] Memory required for data: 5439881728 +I0111 18:29:32.556031 12987 layer_factory.hpp:77] Creating layer res3d_relu +I0111 18:29:32.556037 12987 net.cpp:100] Creating Layer res3d_relu +I0111 18:29:32.556042 12987 net.cpp:434] res3d_relu <- res3d +I0111 18:29:32.556051 12987 net.cpp:395] res3d_relu -> res3d (in-place) +I0111 18:29:32.556332 12987 net.cpp:150] Setting up res3d_relu +I0111 18:29:32.556347 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.556351 12987 net.cpp:165] Memory required for data: 5491261952 +I0111 18:29:32.556356 12987 layer_factory.hpp:77] Creating layer res3d_res3d_relu_0_split +I0111 18:29:32.556365 12987 net.cpp:100] Creating Layer res3d_res3d_relu_0_split +I0111 18:29:32.556380 12987 net.cpp:434] res3d_res3d_relu_0_split <- res3d +I0111 18:29:32.556394 12987 net.cpp:408] res3d_res3d_relu_0_split -> res3d_res3d_relu_0_split_0 +I0111 18:29:32.556407 12987 net.cpp:408] res3d_res3d_relu_0_split -> res3d_res3d_relu_0_split_1 +I0111 18:29:32.556484 12987 net.cpp:150] Setting up res3d_res3d_relu_0_split +I0111 18:29:32.556495 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.556501 12987 net.cpp:157] Top shape: 32 512 28 28 (12845056) +I0111 18:29:32.556505 12987 net.cpp:165] Memory required for data: 5594022400 +I0111 18:29:32.556517 12987 layer_factory.hpp:77] Creating layer res4a_branch1 +I0111 18:29:32.556536 12987 net.cpp:100] Creating Layer res4a_branch1 +I0111 18:29:32.556543 12987 net.cpp:434] res4a_branch1 <- res3d_res3d_relu_0_split_0 +I0111 18:29:32.556551 12987 net.cpp:408] res4a_branch1 -> res4a_branch1 +I0111 18:29:32.569419 12987 net.cpp:150] Setting up res4a_branch1 +I0111 18:29:32.569443 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.569450 12987 net.cpp:165] Memory required for data: 5619712512 +I0111 18:29:32.569458 12987 layer_factory.hpp:77] Creating layer bn4a_branch1 +I0111 18:29:32.569470 12987 net.cpp:100] Creating Layer bn4a_branch1 +I0111 18:29:32.569478 12987 net.cpp:434] bn4a_branch1 <- res4a_branch1 +I0111 18:29:32.569484 12987 net.cpp:395] bn4a_branch1 -> res4a_branch1 (in-place) +I0111 18:29:32.569816 12987 net.cpp:150] Setting up bn4a_branch1 +I0111 18:29:32.569830 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.569834 12987 net.cpp:165] Memory required for data: 5645402624 +I0111 18:29:32.569844 12987 layer_factory.hpp:77] Creating layer scale4a_branch1 +I0111 18:29:32.569854 12987 net.cpp:100] Creating Layer scale4a_branch1 +I0111 18:29:32.569859 12987 net.cpp:434] scale4a_branch1 <- res4a_branch1 +I0111 18:29:32.569864 12987 net.cpp:395] scale4a_branch1 -> res4a_branch1 (in-place) +I0111 18:29:32.569923 12987 layer_factory.hpp:77] Creating layer scale4a_branch1 +I0111 18:29:32.570111 12987 net.cpp:150] Setting up scale4a_branch1 +I0111 18:29:32.570123 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.570127 12987 net.cpp:165] Memory required for data: 5671092736 +I0111 18:29:32.570135 12987 layer_factory.hpp:77] Creating layer res4a_branch2a +I0111 18:29:32.570159 12987 net.cpp:100] Creating Layer res4a_branch2a +I0111 18:29:32.570168 12987 net.cpp:434] res4a_branch2a <- res3d_res3d_relu_0_split_1 +I0111 18:29:32.570179 12987 net.cpp:408] res4a_branch2a -> res4a_branch2a +I0111 18:29:32.576040 12987 net.cpp:150] Setting up res4a_branch2a +I0111 18:29:32.576062 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.576067 12987 net.cpp:165] Memory required for data: 5677515264 +I0111 18:29:32.576074 12987 layer_factory.hpp:77] Creating layer bn4a_branch2a +I0111 18:29:32.576086 12987 net.cpp:100] Creating Layer bn4a_branch2a +I0111 18:29:32.576092 12987 net.cpp:434] bn4a_branch2a <- res4a_branch2a +I0111 18:29:32.576099 12987 net.cpp:395] bn4a_branch2a -> res4a_branch2a (in-place) +I0111 18:29:32.576405 12987 net.cpp:150] Setting up bn4a_branch2a +I0111 18:29:32.576416 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.576421 12987 net.cpp:165] Memory required for data: 5683937792 +I0111 18:29:32.576429 12987 layer_factory.hpp:77] Creating layer scale4a_branch2a +I0111 18:29:32.576436 12987 net.cpp:100] Creating Layer scale4a_branch2a +I0111 18:29:32.576441 12987 net.cpp:434] scale4a_branch2a <- res4a_branch2a +I0111 18:29:32.576448 12987 net.cpp:395] scale4a_branch2a -> res4a_branch2a (in-place) +I0111 18:29:32.576508 12987 layer_factory.hpp:77] Creating layer scale4a_branch2a +I0111 18:29:32.576683 12987 net.cpp:150] Setting up scale4a_branch2a +I0111 18:29:32.576695 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.576700 12987 net.cpp:165] Memory required for data: 5690360320 +I0111 18:29:32.576707 12987 layer_factory.hpp:77] Creating layer res4a_branch2a_relu +I0111 18:29:32.576714 12987 net.cpp:100] Creating Layer res4a_branch2a_relu +I0111 18:29:32.576727 12987 net.cpp:434] res4a_branch2a_relu <- res4a_branch2a +I0111 18:29:32.576733 12987 net.cpp:395] res4a_branch2a_relu -> res4a_branch2a (in-place) +I0111 18:29:32.577008 12987 net.cpp:150] Setting up res4a_branch2a_relu +I0111 18:29:32.577023 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.577026 12987 net.cpp:165] Memory required for data: 5696782848 +I0111 18:29:32.577030 12987 layer_factory.hpp:77] Creating layer res4a_branch2b +I0111 18:29:32.577059 12987 net.cpp:100] Creating Layer res4a_branch2b +I0111 18:29:32.577066 12987 net.cpp:434] res4a_branch2b <- res4a_branch2a +I0111 18:29:32.577076 12987 net.cpp:408] res4a_branch2b -> res4a_branch2b +I0111 18:29:32.590011 12987 net.cpp:150] Setting up res4a_branch2b +I0111 18:29:32.590039 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.590044 12987 net.cpp:165] Memory required for data: 5703205376 +I0111 18:29:32.590052 12987 layer_factory.hpp:77] Creating layer bn4a_branch2b +I0111 18:29:32.590064 12987 net.cpp:100] Creating Layer bn4a_branch2b +I0111 18:29:32.590068 12987 net.cpp:434] bn4a_branch2b <- res4a_branch2b +I0111 18:29:32.590085 12987 net.cpp:395] bn4a_branch2b -> res4a_branch2b (in-place) +I0111 18:29:32.590399 12987 net.cpp:150] Setting up bn4a_branch2b +I0111 18:29:32.590410 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.590415 12987 net.cpp:165] Memory required for data: 5709627904 +I0111 18:29:32.590423 12987 layer_factory.hpp:77] Creating layer scale4a_branch2b +I0111 18:29:32.590431 12987 net.cpp:100] Creating Layer scale4a_branch2b +I0111 18:29:32.590438 12987 net.cpp:434] scale4a_branch2b <- res4a_branch2b +I0111 18:29:32.590446 12987 net.cpp:395] scale4a_branch2b -> res4a_branch2b (in-place) +I0111 18:29:32.590509 12987 layer_factory.hpp:77] Creating layer scale4a_branch2b +I0111 18:29:32.590685 12987 net.cpp:150] Setting up scale4a_branch2b +I0111 18:29:32.590698 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.590703 12987 net.cpp:165] Memory required for data: 5716050432 +I0111 18:29:32.590709 12987 layer_factory.hpp:77] Creating layer res4a_branch2b_relu +I0111 18:29:32.590716 12987 net.cpp:100] Creating Layer res4a_branch2b_relu +I0111 18:29:32.590720 12987 net.cpp:434] res4a_branch2b_relu <- res4a_branch2b +I0111 18:29:32.590725 12987 net.cpp:395] res4a_branch2b_relu -> res4a_branch2b (in-place) +I0111 18:29:32.591930 12987 net.cpp:150] Setting up res4a_branch2b_relu +I0111 18:29:32.591949 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.591954 12987 net.cpp:165] Memory required for data: 5722472960 +I0111 18:29:32.591958 12987 layer_factory.hpp:77] Creating layer res4a_branch2c +I0111 18:29:32.591986 12987 net.cpp:100] Creating Layer res4a_branch2c +I0111 18:29:32.591996 12987 net.cpp:434] res4a_branch2c <- res4a_branch2b +I0111 18:29:32.592005 12987 net.cpp:408] res4a_branch2c -> res4a_branch2c +I0111 18:29:32.599773 12987 net.cpp:150] Setting up res4a_branch2c +I0111 18:29:32.599794 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.599799 12987 net.cpp:165] Memory required for data: 5748163072 +I0111 18:29:32.599807 12987 layer_factory.hpp:77] Creating layer bn4a_branch2c +I0111 18:29:32.599820 12987 net.cpp:100] Creating Layer bn4a_branch2c +I0111 18:29:32.599828 12987 net.cpp:434] bn4a_branch2c <- res4a_branch2c +I0111 18:29:32.599836 12987 net.cpp:395] bn4a_branch2c -> res4a_branch2c (in-place) +I0111 18:29:32.600136 12987 net.cpp:150] Setting up bn4a_branch2c +I0111 18:29:32.600147 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.600150 12987 net.cpp:165] Memory required for data: 5773853184 +I0111 18:29:32.600159 12987 layer_factory.hpp:77] Creating layer scale4a_branch2c +I0111 18:29:32.600167 12987 net.cpp:100] Creating Layer scale4a_branch2c +I0111 18:29:32.600172 12987 net.cpp:434] scale4a_branch2c <- res4a_branch2c +I0111 18:29:32.600183 12987 net.cpp:395] scale4a_branch2c -> res4a_branch2c (in-place) +I0111 18:29:32.600239 12987 layer_factory.hpp:77] Creating layer scale4a_branch2c +I0111 18:29:32.600424 12987 net.cpp:150] Setting up scale4a_branch2c +I0111 18:29:32.600445 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.600450 12987 net.cpp:165] Memory required for data: 5799543296 +I0111 18:29:32.600456 12987 layer_factory.hpp:77] Creating layer res4a +I0111 18:29:32.600463 12987 net.cpp:100] Creating Layer res4a +I0111 18:29:32.600467 12987 net.cpp:434] res4a <- res4a_branch1 +I0111 18:29:32.600472 12987 net.cpp:434] res4a <- res4a_branch2c +I0111 18:29:32.600477 12987 net.cpp:408] res4a -> res4a +I0111 18:29:32.600517 12987 net.cpp:150] Setting up res4a +I0111 18:29:32.600528 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.600530 12987 net.cpp:165] Memory required for data: 5825233408 +I0111 18:29:32.600534 12987 layer_factory.hpp:77] Creating layer res4a_relu +I0111 18:29:32.600550 12987 net.cpp:100] Creating Layer res4a_relu +I0111 18:29:32.600558 12987 net.cpp:434] res4a_relu <- res4a +I0111 18:29:32.600564 12987 net.cpp:395] res4a_relu -> res4a (in-place) +I0111 18:29:32.600819 12987 net.cpp:150] Setting up res4a_relu +I0111 18:29:32.600836 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.600839 12987 net.cpp:165] Memory required for data: 5850923520 +I0111 18:29:32.600843 12987 layer_factory.hpp:77] Creating layer res4a_res4a_relu_0_split +I0111 18:29:32.600879 12987 net.cpp:100] Creating Layer res4a_res4a_relu_0_split +I0111 18:29:32.600886 12987 net.cpp:434] res4a_res4a_relu_0_split <- res4a +I0111 18:29:32.600893 12987 net.cpp:408] res4a_res4a_relu_0_split -> res4a_res4a_relu_0_split_0 +I0111 18:29:32.600903 12987 net.cpp:408] res4a_res4a_relu_0_split -> res4a_res4a_relu_0_split_1 +I0111 18:29:32.600967 12987 net.cpp:150] Setting up res4a_res4a_relu_0_split +I0111 18:29:32.600978 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.600983 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.600986 12987 net.cpp:165] Memory required for data: 5902303744 +I0111 18:29:32.600991 12987 layer_factory.hpp:77] Creating layer res4b_branch2a +I0111 18:29:32.601003 12987 net.cpp:100] Creating Layer res4b_branch2a +I0111 18:29:32.601011 12987 net.cpp:434] res4b_branch2a <- res4a_res4a_relu_0_split_0 +I0111 18:29:32.601017 12987 net.cpp:408] res4b_branch2a -> res4b_branch2a +I0111 18:29:32.608491 12987 net.cpp:150] Setting up res4b_branch2a +I0111 18:29:32.608515 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.608522 12987 net.cpp:165] Memory required for data: 5908726272 +I0111 18:29:32.608530 12987 layer_factory.hpp:77] Creating layer bn4b_branch2a +I0111 18:29:32.608537 12987 net.cpp:100] Creating Layer bn4b_branch2a +I0111 18:29:32.608541 12987 net.cpp:434] bn4b_branch2a <- res4b_branch2a +I0111 18:29:32.608551 12987 net.cpp:395] bn4b_branch2a -> res4b_branch2a (in-place) +I0111 18:29:32.608847 12987 net.cpp:150] Setting up bn4b_branch2a +I0111 18:29:32.608858 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.608862 12987 net.cpp:165] Memory required for data: 5915148800 +I0111 18:29:32.608878 12987 layer_factory.hpp:77] Creating layer scale4b_branch2a +I0111 18:29:32.608888 12987 net.cpp:100] Creating Layer scale4b_branch2a +I0111 18:29:32.608892 12987 net.cpp:434] scale4b_branch2a <- res4b_branch2a +I0111 18:29:32.608898 12987 net.cpp:395] scale4b_branch2a -> res4b_branch2a (in-place) +I0111 18:29:32.608963 12987 layer_factory.hpp:77] Creating layer scale4b_branch2a +I0111 18:29:32.609127 12987 net.cpp:150] Setting up scale4b_branch2a +I0111 18:29:32.609138 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.609141 12987 net.cpp:165] Memory required for data: 5921571328 +I0111 18:29:32.609148 12987 layer_factory.hpp:77] Creating layer res4b_branch2a_relu +I0111 18:29:32.609153 12987 net.cpp:100] Creating Layer res4b_branch2a_relu +I0111 18:29:32.609158 12987 net.cpp:434] res4b_branch2a_relu <- res4b_branch2a +I0111 18:29:32.609164 12987 net.cpp:395] res4b_branch2a_relu -> res4b_branch2a (in-place) +I0111 18:29:32.609413 12987 net.cpp:150] Setting up res4b_branch2a_relu +I0111 18:29:32.609428 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.609441 12987 net.cpp:165] Memory required for data: 5927993856 +I0111 18:29:32.609446 12987 layer_factory.hpp:77] Creating layer res4b_branch2b +I0111 18:29:32.609457 12987 net.cpp:100] Creating Layer res4b_branch2b +I0111 18:29:32.609464 12987 net.cpp:434] res4b_branch2b <- res4b_branch2a +I0111 18:29:32.609472 12987 net.cpp:408] res4b_branch2b -> res4b_branch2b +I0111 18:29:32.621441 12987 net.cpp:150] Setting up res4b_branch2b +I0111 18:29:32.621462 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.621466 12987 net.cpp:165] Memory required for data: 5934416384 +I0111 18:29:32.621474 12987 layer_factory.hpp:77] Creating layer bn4b_branch2b +I0111 18:29:32.621481 12987 net.cpp:100] Creating Layer bn4b_branch2b +I0111 18:29:32.621485 12987 net.cpp:434] bn4b_branch2b <- res4b_branch2b +I0111 18:29:32.621491 12987 net.cpp:395] bn4b_branch2b -> res4b_branch2b (in-place) +I0111 18:29:32.621767 12987 net.cpp:150] Setting up bn4b_branch2b +I0111 18:29:32.621778 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.621783 12987 net.cpp:165] Memory required for data: 5940838912 +I0111 18:29:32.621798 12987 layer_factory.hpp:77] Creating layer scale4b_branch2b +I0111 18:29:32.621809 12987 net.cpp:100] Creating Layer scale4b_branch2b +I0111 18:29:32.621814 12987 net.cpp:434] scale4b_branch2b <- res4b_branch2b +I0111 18:29:32.621824 12987 net.cpp:395] scale4b_branch2b -> res4b_branch2b (in-place) +I0111 18:29:32.621886 12987 layer_factory.hpp:77] Creating layer scale4b_branch2b +I0111 18:29:32.622045 12987 net.cpp:150] Setting up scale4b_branch2b +I0111 18:29:32.622056 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.622059 12987 net.cpp:165] Memory required for data: 5947261440 +I0111 18:29:32.622066 12987 layer_factory.hpp:77] Creating layer res4b_branch2b_relu +I0111 18:29:32.622071 12987 net.cpp:100] Creating Layer res4b_branch2b_relu +I0111 18:29:32.622076 12987 net.cpp:434] res4b_branch2b_relu <- res4b_branch2b +I0111 18:29:32.622082 12987 net.cpp:395] res4b_branch2b_relu -> res4b_branch2b (in-place) +I0111 18:29:32.623178 12987 net.cpp:150] Setting up res4b_branch2b_relu +I0111 18:29:32.623195 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.623199 12987 net.cpp:165] Memory required for data: 5953683968 +I0111 18:29:32.623203 12987 layer_factory.hpp:77] Creating layer res4b_branch2c +I0111 18:29:32.623216 12987 net.cpp:100] Creating Layer res4b_branch2c +I0111 18:29:32.623222 12987 net.cpp:434] res4b_branch2c <- res4b_branch2b +I0111 18:29:32.623231 12987 net.cpp:408] res4b_branch2c -> res4b_branch2c +I0111 18:29:32.630095 12987 net.cpp:150] Setting up res4b_branch2c +I0111 18:29:32.630116 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.630120 12987 net.cpp:165] Memory required for data: 5979374080 +I0111 18:29:32.630126 12987 layer_factory.hpp:77] Creating layer bn4b_branch2c +I0111 18:29:32.630134 12987 net.cpp:100] Creating Layer bn4b_branch2c +I0111 18:29:32.630138 12987 net.cpp:434] bn4b_branch2c <- res4b_branch2c +I0111 18:29:32.630143 12987 net.cpp:395] bn4b_branch2c -> res4b_branch2c (in-place) +I0111 18:29:32.630429 12987 net.cpp:150] Setting up bn4b_branch2c +I0111 18:29:32.630441 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.630445 12987 net.cpp:165] Memory required for data: 6005064192 +I0111 18:29:32.630452 12987 layer_factory.hpp:77] Creating layer scale4b_branch2c +I0111 18:29:32.630460 12987 net.cpp:100] Creating Layer scale4b_branch2c +I0111 18:29:32.630463 12987 net.cpp:434] scale4b_branch2c <- res4b_branch2c +I0111 18:29:32.630468 12987 net.cpp:395] scale4b_branch2c -> res4b_branch2c (in-place) +I0111 18:29:32.630525 12987 layer_factory.hpp:77] Creating layer scale4b_branch2c +I0111 18:29:32.630691 12987 net.cpp:150] Setting up scale4b_branch2c +I0111 18:29:32.630702 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.630704 12987 net.cpp:165] Memory required for data: 6030754304 +I0111 18:29:32.630710 12987 layer_factory.hpp:77] Creating layer res4b +I0111 18:29:32.630718 12987 net.cpp:100] Creating Layer res4b +I0111 18:29:32.630731 12987 net.cpp:434] res4b <- res4a_res4a_relu_0_split_1 +I0111 18:29:32.630736 12987 net.cpp:434] res4b <- res4b_branch2c +I0111 18:29:32.630743 12987 net.cpp:408] res4b -> res4b +I0111 18:29:32.630779 12987 net.cpp:150] Setting up res4b +I0111 18:29:32.630789 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.630791 12987 net.cpp:165] Memory required for data: 6056444416 +I0111 18:29:32.630803 12987 layer_factory.hpp:77] Creating layer res4b_relu +I0111 18:29:32.630811 12987 net.cpp:100] Creating Layer res4b_relu +I0111 18:29:32.630815 12987 net.cpp:434] res4b_relu <- res4b +I0111 18:29:32.630820 12987 net.cpp:395] res4b_relu -> res4b (in-place) +I0111 18:29:32.631048 12987 net.cpp:150] Setting up res4b_relu +I0111 18:29:32.631062 12987 net.cpp:157] Top shape: 32 1024 14 14 (6422528) +I0111 18:29:32.631065 12987 net.cpp:165] Memory required for data: 6082134528 +I0111 18:29:32.631070 12987 layer_factory.hpp:77] Creating layer res4c_branch2a +I0111 18:29:32.631083 12987 net.cpp:100] Creating Layer res4c_branch2a +I0111 18:29:32.631089 12987 net.cpp:434] res4c_branch2a <- res4b +I0111 18:29:32.631096 12987 net.cpp:408] res4c_branch2a -> res4c_branch2a +I0111 18:29:32.637892 12987 net.cpp:150] Setting up res4c_branch2a +I0111 18:29:32.637912 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.637920 12987 net.cpp:165] Memory required for data: 6088557056 +I0111 18:29:32.637926 12987 layer_factory.hpp:77] Creating layer bn4c_branch2a +I0111 18:29:32.637936 12987 net.cpp:100] Creating Layer bn4c_branch2a +I0111 18:29:32.637943 12987 net.cpp:434] bn4c_branch2a <- res4c_branch2a +I0111 18:29:32.637951 12987 net.cpp:395] bn4c_branch2a -> res4c_branch2a (in-place) +I0111 18:29:32.638231 12987 net.cpp:150] Setting up bn4c_branch2a +I0111 18:29:32.638240 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.638243 12987 net.cpp:165] Memory required for data: 6094979584 +I0111 18:29:32.638252 12987 layer_factory.hpp:77] Creating layer scale4c_branch2a +I0111 18:29:32.638259 12987 net.cpp:100] Creating Layer scale4c_branch2a +I0111 18:29:32.638263 12987 net.cpp:434] scale4c_branch2a <- res4c_branch2a +I0111 18:29:32.638272 12987 net.cpp:395] scale4c_branch2a -> res4c_branch2a (in-place) +I0111 18:29:32.638329 12987 layer_factory.hpp:77] Creating layer scale4c_branch2a +I0111 18:29:32.638481 12987 net.cpp:150] Setting up scale4c_branch2a +I0111 18:29:32.638494 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.638496 12987 net.cpp:165] Memory required for data: 6101402112 +I0111 18:29:32.638502 12987 layer_factory.hpp:77] Creating layer res4c_branch2a_relu +I0111 18:29:32.638509 12987 net.cpp:100] Creating Layer res4c_branch2a_relu +I0111 18:29:32.638511 12987 net.cpp:434] res4c_branch2a_relu <- res4c_branch2a +I0111 18:29:32.638516 12987 net.cpp:395] res4c_branch2a_relu -> res4c_branch2a (in-place) +I0111 18:29:32.638746 12987 net.cpp:150] Setting up res4c_branch2a_relu +I0111 18:29:32.638761 12987 net.cpp:157] Top shape: 32 256 14 14 (1605632) +I0111 18:29:32.638763 12987 net.cpp:165] Memory required for data: 6107824640 +I0111 18:29:32.638767 12987 layer_factory.hpp:77] Creating layer pool5 +I0111 18:29:32.638775 12987 net.cpp:100] Creating Layer pool5 +I0111 18:29:32.638779 12987 net.cpp:434] pool5 <- res4c_branch2a +I0111 18:29:32.638788 12987 net.cpp:408] pool5 -> pool5 +I0111 18:29:32.639906 12987 net.cpp:150] Setting up pool5 +I0111 18:29:32.639925 12987 net.cpp:157] Top shape: 32 256 8 8 (524288) +I0111 18:29:32.639928 12987 net.cpp:165] Memory required for data: 6109921792 +I0111 18:29:32.639932 12987 layer_factory.hpp:77] Creating layer fc1000 +I0111 18:29:32.639945 12987 net.cpp:100] Creating Layer fc1000 +I0111 18:29:32.639951 12987 net.cpp:434] fc1000 <- pool5 +I0111 18:29:32.639961 12987 net.cpp:408] fc1000 -> fc1000 +I0111 18:29:32.873741 12987 net.cpp:150] Setting up fc1000 +I0111 18:29:32.873767 12987 net.cpp:157] Top shape: 32 1000 (32000) +I0111 18:29:32.873771 12987 net.cpp:165] Memory required for data: 6110049792 +I0111 18:29:32.873780 12987 layer_factory.hpp:77] Creating layer fc1000_fc1000_0_split +I0111 18:29:32.873805 12987 net.cpp:100] Creating Layer fc1000_fc1000_0_split +I0111 18:29:32.873814 12987 net.cpp:434] fc1000_fc1000_0_split <- fc1000 +I0111 18:29:32.873823 12987 net.cpp:408] fc1000_fc1000_0_split -> fc1000_fc1000_0_split_0 +I0111 18:29:32.873832 12987 net.cpp:408] fc1000_fc1000_0_split -> fc1000_fc1000_0_split_1 +I0111 18:29:32.873842 12987 net.cpp:408] fc1000_fc1000_0_split -> fc1000_fc1000_0_split_2 +I0111 18:29:32.873924 12987 net.cpp:150] Setting up fc1000_fc1000_0_split +I0111 18:29:32.873932 12987 net.cpp:157] Top shape: 32 1000 (32000) +I0111 18:29:32.873936 12987 net.cpp:157] Top shape: 32 1000 (32000) +I0111 18:29:32.873940 12987 net.cpp:157] Top shape: 32 1000 (32000) +I0111 18:29:32.873944 12987 net.cpp:165] Memory required for data: 6110433792 +I0111 18:29:32.873946 12987 layer_factory.hpp:77] Creating layer prob +I0111 18:29:32.873960 12987 net.cpp:100] Creating Layer prob +I0111 18:29:32.873968 12987 net.cpp:434] prob <- fc1000_fc1000_0_split_0 +I0111 18:29:32.873973 12987 net.cpp:434] prob <- label_resnet_32_1_split_0 +I0111 18:29:32.873980 12987 net.cpp:408] prob -> prob +I0111 18:29:32.873996 12987 layer_factory.hpp:77] Creating layer prob +I0111 18:29:32.875293 12987 net.cpp:150] Setting up prob +I0111 18:29:32.875311 12987 net.cpp:157] Top shape: (1) +I0111 18:29:32.875315 12987 net.cpp:160] with loss weight 1 +I0111 18:29:32.875349 12987 net.cpp:165] Memory required for data: 6110433796 +I0111 18:29:32.875352 12987 layer_factory.hpp:77] Creating layer accuracy/top1 +I0111 18:29:32.875365 12987 net.cpp:100] Creating Layer accuracy/top1 +I0111 18:29:32.875372 12987 net.cpp:434] accuracy/top1 <- fc1000_fc1000_0_split_1 +I0111 18:29:32.875380 12987 net.cpp:434] accuracy/top1 <- label_resnet_32_1_split_1 +I0111 18:29:32.875386 12987 net.cpp:408] accuracy/top1 -> accuracy@1 +I0111 18:29:32.875401 12987 net.cpp:150] Setting up accuracy/top1 +I0111 18:29:32.875408 12987 net.cpp:157] Top shape: (1) +I0111 18:29:32.875411 12987 net.cpp:165] Memory required for data: 6110433800 +I0111 18:29:32.875414 12987 layer_factory.hpp:77] Creating layer accuracy/top5 +I0111 18:29:32.875422 12987 net.cpp:100] Creating Layer accuracy/top5 +I0111 18:29:32.875425 12987 net.cpp:434] accuracy/top5 <- fc1000_fc1000_0_split_2 +I0111 18:29:32.875430 12987 net.cpp:434] accuracy/top5 <- label_resnet_32_1_split_2 +I0111 18:29:32.875435 12987 net.cpp:408] accuracy/top5 -> accuracy@5 +I0111 18:29:32.875442 12987 net.cpp:150] Setting up accuracy/top5 +I0111 18:29:32.875447 12987 net.cpp:157] Top shape: (1) +I0111 18:29:32.875449 12987 net.cpp:165] Memory required for data: 6110433804 +I0111 18:29:32.875453 12987 net.cpp:228] accuracy/top5 does not need backward computation. +I0111 18:29:32.875463 12987 net.cpp:228] accuracy/top1 does not need backward computation. +I0111 18:29:32.875466 12987 net.cpp:226] prob needs backward computation. +I0111 18:29:32.875471 12987 net.cpp:226] fc1000_fc1000_0_split needs backward computation. +I0111 18:29:32.875474 12987 net.cpp:226] fc1000 needs backward computation. +I0111 18:29:32.875478 12987 net.cpp:226] pool5 needs backward computation. +I0111 18:29:32.875481 12987 net.cpp:226] res4c_branch2a_relu needs backward computation. +I0111 18:29:32.875485 12987 net.cpp:226] scale4c_branch2a needs backward computation. +I0111 18:29:32.875489 12987 net.cpp:226] bn4c_branch2a needs backward computation. +I0111 18:29:32.875493 12987 net.cpp:226] res4c_branch2a needs backward computation. +I0111 18:29:32.875495 12987 net.cpp:226] res4b_relu needs backward computation. +I0111 18:29:32.875500 12987 net.cpp:226] res4b needs backward computation. +I0111 18:29:32.875505 12987 net.cpp:226] scale4b_branch2c needs backward computation. +I0111 18:29:32.875509 12987 net.cpp:226] bn4b_branch2c needs backward computation. +I0111 18:29:32.875512 12987 net.cpp:226] res4b_branch2c needs backward computation. +I0111 18:29:32.875516 12987 net.cpp:226] res4b_branch2b_relu needs backward computation. +I0111 18:29:32.875520 12987 net.cpp:226] scale4b_branch2b needs backward computation. +I0111 18:29:32.875524 12987 net.cpp:226] bn4b_branch2b needs backward computation. +I0111 18:29:32.875533 12987 net.cpp:226] res4b_branch2b needs backward computation. +I0111 18:29:32.875537 12987 net.cpp:226] res4b_branch2a_relu needs backward computation. +I0111 18:29:32.875540 12987 net.cpp:226] scale4b_branch2a needs backward computation. +I0111 18:29:32.875545 12987 net.cpp:226] bn4b_branch2a needs backward computation. +I0111 18:29:32.875547 12987 net.cpp:226] res4b_branch2a needs backward computation. +I0111 18:29:32.875552 12987 net.cpp:226] res4a_res4a_relu_0_split needs backward computation. +I0111 18:29:32.875556 12987 net.cpp:226] res4a_relu needs backward computation. +I0111 18:29:32.875560 12987 net.cpp:226] res4a needs backward computation. +I0111 18:29:32.875568 12987 net.cpp:226] scale4a_branch2c needs backward computation. +I0111 18:29:32.875572 12987 net.cpp:226] bn4a_branch2c needs backward computation. +I0111 18:29:32.875576 12987 net.cpp:226] res4a_branch2c needs backward computation. +I0111 18:29:32.875579 12987 net.cpp:226] res4a_branch2b_relu needs backward computation. +I0111 18:29:32.875583 12987 net.cpp:226] scale4a_branch2b needs backward computation. +I0111 18:29:32.875586 12987 net.cpp:226] bn4a_branch2b needs backward computation. +I0111 18:29:32.875589 12987 net.cpp:226] res4a_branch2b needs backward computation. +I0111 18:29:32.875593 12987 net.cpp:226] res4a_branch2a_relu needs backward computation. +I0111 18:29:32.875597 12987 net.cpp:226] scale4a_branch2a needs backward computation. +I0111 18:29:32.875600 12987 net.cpp:226] bn4a_branch2a needs backward computation. +I0111 18:29:32.875603 12987 net.cpp:226] res4a_branch2a needs backward computation. +I0111 18:29:32.875608 12987 net.cpp:226] scale4a_branch1 needs backward computation. +I0111 18:29:32.875612 12987 net.cpp:226] bn4a_branch1 needs backward computation. +I0111 18:29:32.875615 12987 net.cpp:226] res4a_branch1 needs backward computation. +I0111 18:29:32.875619 12987 net.cpp:226] res3d_res3d_relu_0_split needs backward computation. +I0111 18:29:32.875624 12987 net.cpp:226] res3d_relu needs backward computation. +I0111 18:29:32.875628 12987 net.cpp:226] res3d needs backward computation. +I0111 18:29:32.875633 12987 net.cpp:226] scale3d_branch2c needs backward computation. +I0111 18:29:32.875638 12987 net.cpp:226] bn3d_branch2c needs backward computation. +I0111 18:29:32.875641 12987 net.cpp:226] res3d_branch2c needs backward computation. +I0111 18:29:32.875644 12987 net.cpp:226] res3d_branch2b_relu needs backward computation. +I0111 18:29:32.875648 12987 net.cpp:226] scale3d_branch2b needs backward computation. +I0111 18:29:32.875651 12987 net.cpp:226] bn3d_branch2b needs backward computation. +I0111 18:29:32.875655 12987 net.cpp:226] res3d_branch2b needs backward computation. +I0111 18:29:32.875658 12987 net.cpp:226] res3d_branch2a_relu needs backward computation. +I0111 18:29:32.875663 12987 net.cpp:226] scale3d_branch2a needs backward computation. +I0111 18:29:32.875666 12987 net.cpp:226] bn3d_branch2a needs backward computation. +I0111 18:29:32.875670 12987 net.cpp:226] res3d_branch2a needs backward computation. +I0111 18:29:32.875674 12987 net.cpp:226] res3c_res3c_relu_0_split needs backward computation. +I0111 18:29:32.875677 12987 net.cpp:226] res3c_relu needs backward computation. +I0111 18:29:32.875681 12987 net.cpp:226] res3c needs backward computation. +I0111 18:29:32.875687 12987 net.cpp:226] scale3c_branch2c needs backward computation. +I0111 18:29:32.875690 12987 net.cpp:226] bn3c_branch2c needs backward computation. +I0111 18:29:32.875694 12987 net.cpp:226] res3c_branch2c needs backward computation. +I0111 18:29:32.875699 12987 net.cpp:226] res3c_branch2b_relu needs backward computation. +I0111 18:29:32.875702 12987 net.cpp:226] scale3c_branch2b needs backward computation. +I0111 18:29:32.875705 12987 net.cpp:226] bn3c_branch2b needs backward computation. +I0111 18:29:32.875710 12987 net.cpp:226] res3c_branch2b needs backward computation. +I0111 18:29:32.875712 12987 net.cpp:226] res3c_branch2a_relu needs backward computation. +I0111 18:29:32.875716 12987 net.cpp:226] scale3c_branch2a needs backward computation. +I0111 18:29:32.875723 12987 net.cpp:226] bn3c_branch2a needs backward computation. +I0111 18:29:32.875727 12987 net.cpp:226] res3c_branch2a needs backward computation. +I0111 18:29:32.875730 12987 net.cpp:226] res3b_res3b_relu_0_split needs backward computation. +I0111 18:29:32.875735 12987 net.cpp:226] res3b_relu needs backward computation. +I0111 18:29:32.875741 12987 net.cpp:226] res3b needs backward computation. +I0111 18:29:32.875744 12987 net.cpp:226] scale3b_branch2c needs backward computation. +I0111 18:29:32.875748 12987 net.cpp:226] bn3b_branch2c needs backward computation. +I0111 18:29:32.875752 12987 net.cpp:226] res3b_branch2c needs backward computation. +I0111 18:29:32.875756 12987 net.cpp:226] res3b_branch2b_relu needs backward computation. +I0111 18:29:32.875759 12987 net.cpp:226] scale3b_branch2b needs backward computation. +I0111 18:29:32.875762 12987 net.cpp:226] bn3b_branch2b needs backward computation. +I0111 18:29:32.875766 12987 net.cpp:226] res3b_branch2b needs backward computation. +I0111 18:29:32.875768 12987 net.cpp:226] res3b_branch2a_relu needs backward computation. +I0111 18:29:32.875771 12987 net.cpp:226] scale3b_branch2a needs backward computation. +I0111 18:29:32.875775 12987 net.cpp:226] bn3b_branch2a needs backward computation. +I0111 18:29:32.875778 12987 net.cpp:226] res3b_branch2a needs backward computation. +I0111 18:29:32.875782 12987 net.cpp:226] res3a_res3a_relu_0_split needs backward computation. +I0111 18:29:32.875785 12987 net.cpp:226] res3a_relu needs backward computation. +I0111 18:29:32.875789 12987 net.cpp:226] res3a needs backward computation. +I0111 18:29:32.875793 12987 net.cpp:226] scale3a_branch2c needs backward computation. +I0111 18:29:32.875797 12987 net.cpp:226] bn3a_branch2c needs backward computation. +I0111 18:29:32.875800 12987 net.cpp:226] res3a_branch2c needs backward computation. +I0111 18:29:32.875803 12987 net.cpp:226] res3a_branch2b_relu needs backward computation. +I0111 18:29:32.875808 12987 net.cpp:226] scale3a_branch2b needs backward computation. +I0111 18:29:32.875810 12987 net.cpp:226] bn3a_branch2b needs backward computation. +I0111 18:29:32.875813 12987 net.cpp:226] res3a_branch2b needs backward computation. +I0111 18:29:32.875816 12987 net.cpp:226] res3a_branch2a_relu needs backward computation. +I0111 18:29:32.875820 12987 net.cpp:226] scale3a_branch2a needs backward computation. +I0111 18:29:32.875823 12987 net.cpp:226] bn3a_branch2a needs backward computation. +I0111 18:29:32.875828 12987 net.cpp:226] res3a_branch2a needs backward computation. +I0111 18:29:32.875831 12987 net.cpp:226] scale3a_branch1 needs backward computation. +I0111 18:29:32.875834 12987 net.cpp:226] bn3a_branch1 needs backward computation. +I0111 18:29:32.875838 12987 net.cpp:226] res3a_branch1 needs backward computation. +I0111 18:29:32.875843 12987 net.cpp:226] res2c_res2c_relu_0_split needs backward computation. +I0111 18:29:32.875845 12987 net.cpp:226] res2c_relu needs backward computation. +I0111 18:29:32.875849 12987 net.cpp:226] res2c needs backward computation. +I0111 18:29:32.875854 12987 net.cpp:226] scale2c_branch2c needs backward computation. +I0111 18:29:32.875859 12987 net.cpp:226] bn2c_branch2c needs backward computation. +I0111 18:29:32.875861 12987 net.cpp:226] res2c_branch2c needs backward computation. +I0111 18:29:32.875865 12987 net.cpp:226] res2c_branch2b_relu needs backward computation. +I0111 18:29:32.875869 12987 net.cpp:226] scale2c_branch2b needs backward computation. +I0111 18:29:32.875871 12987 net.cpp:226] bn2c_branch2b needs backward computation. +I0111 18:29:32.875875 12987 net.cpp:226] res2c_branch2b needs backward computation. +I0111 18:29:32.875880 12987 net.cpp:226] res2c_branch2a_relu needs backward computation. +I0111 18:29:32.875882 12987 net.cpp:226] scale2c_branch2a needs backward computation. +I0111 18:29:32.875886 12987 net.cpp:226] bn2c_branch2a needs backward computation. +I0111 18:29:32.875890 12987 net.cpp:226] res2c_branch2a needs backward computation. +I0111 18:29:32.875893 12987 net.cpp:226] res2b_res2b_relu_0_split needs backward computation. +I0111 18:29:32.875906 12987 net.cpp:226] res2b_relu needs backward computation. +I0111 18:29:32.875910 12987 net.cpp:226] res2b needs backward computation. +I0111 18:29:32.875915 12987 net.cpp:226] scale2b_branch2c needs backward computation. +I0111 18:29:32.875918 12987 net.cpp:226] bn2b_branch2c needs backward computation. +I0111 18:29:32.875921 12987 net.cpp:226] res2b_branch2c needs backward computation. +I0111 18:29:32.875926 12987 net.cpp:226] res2b_branch2b_relu needs backward computation. +I0111 18:29:32.875928 12987 net.cpp:226] scale2b_branch2b needs backward computation. +I0111 18:29:32.875932 12987 net.cpp:226] bn2b_branch2b needs backward computation. +I0111 18:29:32.875936 12987 net.cpp:226] res2b_branch2b needs backward computation. +I0111 18:29:32.875938 12987 net.cpp:226] res2b_branch2a_relu needs backward computation. +I0111 18:29:32.875942 12987 net.cpp:226] scale2b_branch2a needs backward computation. +I0111 18:29:32.875946 12987 net.cpp:226] bn2b_branch2a needs backward computation. +I0111 18:29:32.875949 12987 net.cpp:226] res2b_branch2a needs backward computation. +I0111 18:29:32.875954 12987 net.cpp:226] res2a_res2a_relu_0_split needs backward computation. +I0111 18:29:32.875958 12987 net.cpp:226] res2a_relu needs backward computation. +I0111 18:29:32.875962 12987 net.cpp:226] res2a needs backward computation. +I0111 18:29:32.875965 12987 net.cpp:226] scale2a_branch2c needs backward computation. +I0111 18:29:32.875969 12987 net.cpp:226] bn2a_branch2c needs backward computation. +I0111 18:29:32.875973 12987 net.cpp:226] res2a_branch2c needs backward computation. +I0111 18:29:32.875977 12987 net.cpp:226] res2a_branch2b_relu needs backward computation. +I0111 18:29:32.875980 12987 net.cpp:226] scale2a_branch2b needs backward computation. +I0111 18:29:32.875983 12987 net.cpp:226] bn2a_branch2b needs backward computation. +I0111 18:29:32.875986 12987 net.cpp:226] res2a_branch2b needs backward computation. +I0111 18:29:32.875990 12987 net.cpp:226] res2a_branch2a_relu needs backward computation. +I0111 18:29:32.875993 12987 net.cpp:226] scale2a_branch2a needs backward computation. +I0111 18:29:32.875996 12987 net.cpp:226] bn2a_branch2a needs backward computation. +I0111 18:29:32.876000 12987 net.cpp:226] res2a_branch2a needs backward computation. +I0111 18:29:32.876004 12987 net.cpp:226] scale2a_branch1 needs backward computation. +I0111 18:29:32.876008 12987 net.cpp:226] bn2a_branch1 needs backward computation. +I0111 18:29:32.876010 12987 net.cpp:226] res2a_branch1 needs backward computation. +I0111 18:29:32.876014 12987 net.cpp:226] pool1_pool1_0_split needs backward computation. +I0111 18:29:32.876019 12987 net.cpp:226] pool1 needs backward computation. +I0111 18:29:32.876025 12987 net.cpp:226] conv1_relu needs backward computation. +I0111 18:29:32.876029 12987 net.cpp:226] scale_conv1 needs backward computation. +I0111 18:29:32.876032 12987 net.cpp:226] bn_conv1 needs backward computation. +I0111 18:29:32.876035 12987 net.cpp:226] conv1 needs backward computation. +I0111 18:29:32.876040 12987 net.cpp:228] label_resnet_32_1_split does not need backward computation. +I0111 18:29:32.876044 12987 net.cpp:228] resnet_32 does not need backward computation. +I0111 18:29:32.876047 12987 net.cpp:270] This network produces output accuracy@1 +I0111 18:29:32.876051 12987 net.cpp:270] This network produces output accuracy@5 +I0111 18:29:32.876055 12987 net.cpp:270] This network produces output prob +I0111 18:29:32.876168 12987 net.cpp:283] Network initialization done. +I0111 18:29:32.876629 12987 solver.cpp:60] Solver scaffolding done. +I0111 18:29:32.928978 12987 parallel.cpp:392] GPUs pairs 0:1, 2:3, 0:2 +I0111 18:29:33.431982 12987 data_layer.cpp:41] output data size: 32,3,224,224 +I0111 18:29:35.065269 12987 data_layer.cpp:41] output data size: 32,3,224,224 +I0111 18:29:36.886327 12987 data_layer.cpp:41] output data size: 32,3,224,224 +I0111 18:29:38.056330 12987 parallel.cpp:425] Starting Optimization +I0111 18:29:38.056934 12987 solver.cpp:279] Solving ResNet-32 +I0111 18:29:38.056944 12987 solver.cpp:280] Learning Rate Policy: step +I0111 18:29:39.714201 12987 solver.cpp:228] Iteration 0, loss = 7.16275 +I0111 18:29:39.714249 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0 +I0111 18:29:39.714262 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.03125 +I0111 18:29:39.714277 12987 solver.cpp:244] Train net output #2: prob = 7.16275 (* 1 = 7.16275 loss) +I0111 18:29:39.714366 12987 sgd_solver.cpp:106] Iteration 0, lr = 0.1 +I0111 18:38:27.658589 12987 solver.cpp:228] Iteration 1000, loss = 6.92136 +I0111 18:38:27.658850 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0 +I0111 18:38:27.658881 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.03125 +I0111 18:38:27.658900 12987 solver.cpp:244] Train net output #2: prob = 6.91111 (* 1 = 6.91111 loss) +I0111 18:38:27.914134 12987 sgd_solver.cpp:106] Iteration 1000, lr = 0.1 +I0111 18:47:15.663400 12987 solver.cpp:228] Iteration 2000, loss = 6.86752 +I0111 18:47:15.663692 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.03125 +I0111 18:47:15.663743 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.03125 +I0111 18:47:15.663774 12987 solver.cpp:244] Train net output #2: prob = 6.66696 (* 1 = 6.66696 loss) +I0111 18:47:15.916774 12987 sgd_solver.cpp:106] Iteration 2000, lr = 0.1 +I0111 18:56:03.781859 12987 solver.cpp:228] Iteration 3000, loss = 6.59609 +I0111 18:56:03.782095 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0 +I0111 18:56:03.782125 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.0625 +I0111 18:56:03.782138 12987 solver.cpp:244] Train net output #2: prob = 6.14411 (* 1 = 6.14411 loss) +I0111 18:56:04.035096 12987 sgd_solver.cpp:106] Iteration 3000, lr = 0.1 +I0111 19:04:52.986184 12987 solver.cpp:228] Iteration 4000, loss = 6.17589 +I0111 19:04:52.986436 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0 +I0111 19:04:52.986474 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.0625 +I0111 19:04:52.986495 12987 solver.cpp:244] Train net output #2: prob = 6.0016 (* 1 = 6.0016 loss) +I0111 19:04:53.241822 12987 sgd_solver.cpp:106] Iteration 4000, lr = 0.1 +I0111 19:13:43.128335 12987 solver.cpp:228] Iteration 5000, loss = 5.66448 +I0111 19:13:43.128599 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0 +I0111 19:13:43.128638 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.15625 +I0111 19:13:43.128657 12987 solver.cpp:244] Train net output #2: prob = 5.327 (* 1 = 5.327 loss) +I0111 19:13:43.385567 12987 sgd_solver.cpp:106] Iteration 5000, lr = 0.1 +I0111 19:22:33.616711 12987 solver.cpp:228] Iteration 6000, loss = 5.25595 +I0111 19:22:33.616904 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0 +I0111 19:22:33.616914 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.21875 +I0111 19:22:33.616925 12987 solver.cpp:244] Train net output #2: prob = 5.14343 (* 1 = 5.14343 loss) +I0111 19:22:33.871553 12987 sgd_solver.cpp:106] Iteration 6000, lr = 0.1 +I0111 19:31:23.417388 12987 solver.cpp:228] Iteration 7000, loss = 4.90125 +I0111 19:31:23.417609 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.125 +I0111 19:31:23.417636 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.28125 +I0111 19:31:23.417654 12987 solver.cpp:244] Train net output #2: prob = 4.66355 (* 1 = 4.66355 loss) +I0111 19:31:23.671221 12987 sgd_solver.cpp:106] Iteration 7000, lr = 0.1 +I0111 19:40:10.012048 12987 solver.cpp:228] Iteration 8000, loss = 4.60608 +I0111 19:40:10.012275 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.28125 +I0111 19:40:10.012307 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.375 +I0111 19:40:10.012320 12987 solver.cpp:244] Train net output #2: prob = 4.37026 (* 1 = 4.37026 loss) +I0111 19:40:10.268959 12987 sgd_solver.cpp:106] Iteration 8000, lr = 0.1 +I0111 19:48:56.801497 12987 solver.cpp:228] Iteration 9000, loss = 4.38591 +I0111 19:48:56.801759 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.1875 +I0111 19:48:56.801789 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.40625 +I0111 19:48:56.801805 12987 solver.cpp:244] Train net output #2: prob = 4.12003 (* 1 = 4.12003 loss) +I0111 19:48:57.054325 12987 sgd_solver.cpp:106] Iteration 9000, lr = 0.1 +I0111 19:57:43.622279 12987 solver.cpp:228] Iteration 10000, loss = 4.19026 +I0111 19:57:43.622488 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.28125 +I0111 19:57:43.622510 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.40625 +I0111 19:57:43.622525 12987 solver.cpp:244] Train net output #2: prob = 3.75889 (* 1 = 3.75889 loss) +I0111 19:57:43.862663 12987 sgd_solver.cpp:106] Iteration 10000, lr = 0.1 +I0111 20:06:30.539824 12987 solver.cpp:228] Iteration 11000, loss = 4.01061 +I0111 20:06:30.540050 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.3125 +I0111 20:06:30.540091 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.46875 +I0111 20:06:30.540112 12987 solver.cpp:244] Train net output #2: prob = 3.36438 (* 1 = 3.36438 loss) +I0111 20:06:30.790807 12987 sgd_solver.cpp:106] Iteration 11000, lr = 0.1 +I0111 20:15:17.882861 12987 solver.cpp:228] Iteration 12000, loss = 3.86194 +I0111 20:15:17.883100 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.21875 +I0111 20:15:17.883150 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.46875 +I0111 20:15:17.883167 12987 solver.cpp:244] Train net output #2: prob = 3.76239 (* 1 = 3.76239 loss) +I0111 20:15:18.134950 12987 sgd_solver.cpp:106] Iteration 12000, lr = 0.1 +I0111 20:24:05.511695 12987 solver.cpp:228] Iteration 13000, loss = 3.73196 +I0111 20:24:05.511935 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.21875 +I0111 20:24:05.511947 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.5 +I0111 20:24:05.511958 12987 solver.cpp:244] Train net output #2: prob = 3.93543 (* 1 = 3.93543 loss) +I0111 20:24:05.762383 12987 sgd_solver.cpp:106] Iteration 13000, lr = 0.1 +I0111 20:32:53.014847 12987 solver.cpp:228] Iteration 14000, loss = 3.59563 +I0111 20:32:53.015038 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.34375 +I0111 20:32:53.015048 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.5 +I0111 20:32:53.015058 12987 solver.cpp:244] Train net output #2: prob = 3.59397 (* 1 = 3.59397 loss) +I0111 20:32:53.269075 12987 sgd_solver.cpp:106] Iteration 14000, lr = 0.1 +I0111 20:41:39.920667 12987 solver.cpp:228] Iteration 15000, loss = 3.5052 +I0111 20:41:39.920881 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.3125 +I0111 20:41:39.920912 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.59375 +I0111 20:41:39.920924 12987 solver.cpp:244] Train net output #2: prob = 3.3146 (* 1 = 3.3146 loss) +I0111 20:41:40.175020 12987 sgd_solver.cpp:106] Iteration 15000, lr = 0.1 +I0111 20:50:27.245685 12987 solver.cpp:228] Iteration 16000, loss = 3.41794 +I0111 20:50:27.245856 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0111 20:50:27.245865 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.59375 +I0111 20:50:27.245875 12987 solver.cpp:244] Train net output #2: prob = 3.24614 (* 1 = 3.24614 loss) +I0111 20:50:27.500335 12987 sgd_solver.cpp:106] Iteration 16000, lr = 0.1 +I0111 20:59:14.318156 12987 solver.cpp:228] Iteration 17000, loss = 3.33114 +I0111 20:59:14.318342 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0111 20:59:14.318353 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0111 20:59:14.318362 12987 solver.cpp:244] Train net output #2: prob = 2.56923 (* 1 = 2.56923 loss) +I0111 20:59:14.575078 12987 sgd_solver.cpp:106] Iteration 17000, lr = 0.1 +I0111 21:08:01.865661 12987 solver.cpp:228] Iteration 18000, loss = 3.26561 +I0111 21:08:01.865912 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0111 21:08:01.865944 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0111 21:08:01.865957 12987 solver.cpp:244] Train net output #2: prob = 2.75669 (* 1 = 2.75669 loss) +I0111 21:08:02.116483 12987 sgd_solver.cpp:106] Iteration 18000, lr = 0.1 +I0111 21:16:49.312795 12987 solver.cpp:228] Iteration 19000, loss = 3.20723 +I0111 21:16:49.332489 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.25 +I0111 21:16:49.332525 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0111 21:16:49.332543 12987 solver.cpp:244] Train net output #2: prob = 2.86923 (* 1 = 2.86923 loss) +I0111 21:16:49.562237 12987 sgd_solver.cpp:106] Iteration 19000, lr = 0.1 +I0111 21:25:36.838680 12987 solver.cpp:228] Iteration 20000, loss = 3.15304 +I0111 21:25:36.838933 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.25 +I0111 21:25:36.838971 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.5 +I0111 21:25:36.838997 12987 solver.cpp:244] Train net output #2: prob = 3.7669 (* 1 = 3.7669 loss) +I0111 21:25:37.092036 12987 sgd_solver.cpp:106] Iteration 20000, lr = 0.1 +I0111 21:34:23.712949 12987 solver.cpp:228] Iteration 21000, loss = 3.08445 +I0111 21:34:23.713186 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0111 21:34:23.713229 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0111 21:34:23.713248 12987 solver.cpp:244] Train net output #2: prob = 3.16138 (* 1 = 3.16138 loss) +I0111 21:34:23.966631 12987 sgd_solver.cpp:106] Iteration 21000, lr = 0.1 +I0111 21:43:10.544476 12987 solver.cpp:228] Iteration 22000, loss = 3.03705 +I0111 21:43:10.544704 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.25 +I0111 21:43:10.544731 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.59375 +I0111 21:43:10.544742 12987 solver.cpp:244] Train net output #2: prob = 3.08747 (* 1 = 3.08747 loss) +I0111 21:43:10.800061 12987 sgd_solver.cpp:106] Iteration 22000, lr = 0.1 +I0111 21:51:56.917373 12987 solver.cpp:228] Iteration 23000, loss = 3.0083 +I0111 21:51:56.917556 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0111 21:51:56.917567 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0111 21:51:56.917577 12987 solver.cpp:244] Train net output #2: prob = 2.74993 (* 1 = 2.74993 loss) +I0111 21:51:57.170512 12987 sgd_solver.cpp:106] Iteration 23000, lr = 0.1 +I0111 22:00:43.569506 12987 solver.cpp:228] Iteration 24000, loss = 2.97927 +I0111 22:00:43.569703 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0111 22:00:43.569715 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0111 22:00:43.569726 12987 solver.cpp:244] Train net output #2: prob = 3.08696 (* 1 = 3.08696 loss) +I0111 22:00:43.822981 12987 sgd_solver.cpp:106] Iteration 24000, lr = 0.1 +I0111 22:09:29.818006 12987 solver.cpp:228] Iteration 25000, loss = 2.9331 +I0111 22:09:29.818138 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.34375 +I0111 22:09:29.818148 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.59375 +I0111 22:09:29.818156 12987 solver.cpp:244] Train net output #2: prob = 3.02618 (* 1 = 3.02618 loss) +I0111 22:09:30.073252 12987 sgd_solver.cpp:106] Iteration 25000, lr = 0.1 +I0111 22:18:16.370966 12987 solver.cpp:228] Iteration 26000, loss = 2.90196 +I0111 22:18:16.371188 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0111 22:18:16.371201 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.5625 +I0111 22:18:16.371213 12987 solver.cpp:244] Train net output #2: prob = 3.21202 (* 1 = 3.21202 loss) +I0111 22:18:16.625027 12987 sgd_solver.cpp:106] Iteration 26000, lr = 0.1 +I0111 22:27:03.348036 12987 solver.cpp:228] Iteration 27000, loss = 2.84302 +I0111 22:27:03.348222 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0111 22:27:03.348232 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0111 22:27:03.348242 12987 solver.cpp:244] Train net output #2: prob = 2.37112 (* 1 = 2.37112 loss) +I0111 22:27:03.600713 12987 sgd_solver.cpp:106] Iteration 27000, lr = 0.1 +I0111 22:35:50.380149 12987 solver.cpp:228] Iteration 28000, loss = 2.83374 +I0111 22:35:50.380396 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0111 22:35:50.380434 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0111 22:35:50.380452 12987 solver.cpp:244] Train net output #2: prob = 2.83858 (* 1 = 2.83858 loss) +I0111 22:35:50.629467 12987 sgd_solver.cpp:106] Iteration 28000, lr = 0.1 +I0111 22:44:36.584630 12987 solver.cpp:228] Iteration 29000, loss = 2.81504 +I0111 22:44:36.584838 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.34375 +I0111 22:44:36.584874 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0111 22:44:36.584892 12987 solver.cpp:244] Train net output #2: prob = 2.48635 (* 1 = 2.48635 loss) +I0111 22:44:36.836282 12987 sgd_solver.cpp:106] Iteration 29000, lr = 0.1 +I0111 22:53:23.063220 12987 solver.cpp:228] Iteration 30000, loss = 2.7888 +I0111 22:53:23.063511 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0111 22:53:23.063532 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0111 22:53:23.063549 12987 solver.cpp:244] Train net output #2: prob = 2.78947 (* 1 = 2.78947 loss) +I0111 22:53:23.313982 12987 sgd_solver.cpp:106] Iteration 30000, lr = 0.1 +I0111 23:02:09.739822 12987 solver.cpp:228] Iteration 31000, loss = 2.75375 +I0111 23:02:09.740084 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0111 23:02:09.740113 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0111 23:02:09.740130 12987 solver.cpp:244] Train net output #2: prob = 2.08175 (* 1 = 2.08175 loss) +I0111 23:02:09.993533 12987 sgd_solver.cpp:106] Iteration 31000, lr = 0.1 +I0111 23:10:56.100742 12987 solver.cpp:228] Iteration 32000, loss = 2.75111 +I0111 23:10:56.101023 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.3125 +I0111 23:10:56.101055 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0111 23:10:56.101078 12987 solver.cpp:244] Train net output #2: prob = 2.83846 (* 1 = 2.83846 loss) +I0111 23:10:56.350932 12987 sgd_solver.cpp:106] Iteration 32000, lr = 0.1 +I0111 23:19:42.821478 12987 solver.cpp:228] Iteration 33000, loss = 2.74164 +I0111 23:19:42.821729 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0111 23:19:42.821764 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0111 23:19:42.821781 12987 solver.cpp:244] Train net output #2: prob = 2.0737 (* 1 = 2.0737 loss) +I0111 23:19:43.075218 12987 sgd_solver.cpp:106] Iteration 33000, lr = 0.1 +I0111 23:28:29.005448 12987 solver.cpp:228] Iteration 34000, loss = 2.70784 +I0111 23:28:29.005684 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.34375 +I0111 23:28:29.005702 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0111 23:28:29.005717 12987 solver.cpp:244] Train net output #2: prob = 3.14256 (* 1 = 3.14256 loss) +I0111 23:28:29.257369 12987 sgd_solver.cpp:106] Iteration 34000, lr = 0.1 +I0111 23:37:15.423384 12987 solver.cpp:228] Iteration 35000, loss = 2.69462 +I0111 23:37:15.423621 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0111 23:37:15.423633 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0111 23:37:15.423643 12987 solver.cpp:244] Train net output #2: prob = 2.28936 (* 1 = 2.28936 loss) +I0111 23:37:15.679517 12987 sgd_solver.cpp:106] Iteration 35000, lr = 0.1 +I0111 23:46:01.967138 12987 solver.cpp:228] Iteration 36000, loss = 2.67004 +I0111 23:46:01.967325 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0111 23:46:01.967339 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0111 23:46:01.967348 12987 solver.cpp:244] Train net output #2: prob = 2.0663 (* 1 = 2.0663 loss) +I0111 23:46:02.218075 12987 sgd_solver.cpp:106] Iteration 36000, lr = 0.1 +I0111 23:54:49.237290 12987 solver.cpp:228] Iteration 37000, loss = 2.64138 +I0111 23:54:49.237547 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0111 23:54:49.237576 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0111 23:54:49.237598 12987 solver.cpp:244] Train net output #2: prob = 3.21919 (* 1 = 3.21919 loss) +I0111 23:54:49.495002 12987 sgd_solver.cpp:106] Iteration 37000, lr = 0.1 +I0112 00:03:36.399197 12987 solver.cpp:228] Iteration 38000, loss = 2.6393 +I0112 00:03:36.399456 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 00:03:36.399497 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 00:03:36.399515 12987 solver.cpp:244] Train net output #2: prob = 2.36118 (* 1 = 2.36118 loss) +I0112 00:03:36.648574 12987 sgd_solver.cpp:106] Iteration 38000, lr = 0.1 +I0112 00:12:22.891856 12987 solver.cpp:228] Iteration 39000, loss = 2.61336 +I0112 00:12:22.892091 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 00:12:22.892123 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 00:12:22.892134 12987 solver.cpp:244] Train net output #2: prob = 2.49797 (* 1 = 2.49797 loss) +I0112 00:12:23.144495 12987 sgd_solver.cpp:106] Iteration 39000, lr = 0.1 +I0112 00:21:09.828768 12987 solver.cpp:228] Iteration 40000, loss = 2.62 +I0112 00:21:09.828948 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 00:21:09.828958 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 00:21:09.828969 12987 solver.cpp:244] Train net output #2: prob = 2.24159 (* 1 = 2.24159 loss) +I0112 00:21:10.083019 12987 sgd_solver.cpp:106] Iteration 40000, lr = 0.1 +I0112 00:29:57.001981 12987 solver.cpp:228] Iteration 41000, loss = 2.58132 +I0112 00:29:57.002207 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 00:29:57.002218 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.59375 +I0112 00:29:57.002229 12987 solver.cpp:244] Train net output #2: prob = 3.08441 (* 1 = 3.08441 loss) +I0112 00:29:57.253648 12987 sgd_solver.cpp:106] Iteration 41000, lr = 0.1 +I0112 00:38:44.055434 12987 solver.cpp:228] Iteration 42000, loss = 2.55624 +I0112 00:38:44.055636 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 00:38:44.055665 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 00:38:44.055676 12987 solver.cpp:244] Train net output #2: prob = 2.0464 (* 1 = 2.0464 loss) +I0112 00:38:44.309173 12987 sgd_solver.cpp:106] Iteration 42000, lr = 0.1 +I0112 00:47:30.889477 12987 solver.cpp:228] Iteration 43000, loss = 2.55872 +I0112 00:47:30.889678 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 00:47:30.889716 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 00:47:30.889734 12987 solver.cpp:244] Train net output #2: prob = 2.41295 (* 1 = 2.41295 loss) +I0112 00:47:31.139389 12987 sgd_solver.cpp:106] Iteration 43000, lr = 0.1 +I0112 00:56:17.938971 12987 solver.cpp:228] Iteration 44000, loss = 2.55697 +I0112 00:56:17.939206 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 00:56:17.939224 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 00:56:17.939236 12987 solver.cpp:244] Train net output #2: prob = 1.78876 (* 1 = 1.78876 loss) +I0112 00:56:18.189370 12987 sgd_solver.cpp:106] Iteration 44000, lr = 0.1 +I0112 01:05:05.381125 12987 solver.cpp:228] Iteration 45000, loss = 2.525 +I0112 01:05:05.381301 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 01:05:05.381312 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 01:05:05.381323 12987 solver.cpp:244] Train net output #2: prob = 2.45816 (* 1 = 2.45816 loss) +I0112 01:05:05.634081 12987 sgd_solver.cpp:106] Iteration 45000, lr = 0.1 +I0112 01:13:52.777253 12987 solver.cpp:228] Iteration 46000, loss = 2.51965 +I0112 01:13:52.777590 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 01:13:52.777601 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 01:13:52.777611 12987 solver.cpp:244] Train net output #2: prob = 2.59114 (* 1 = 2.59114 loss) +I0112 01:13:53.029285 12987 sgd_solver.cpp:106] Iteration 46000, lr = 0.1 +I0112 01:22:40.087390 12987 solver.cpp:228] Iteration 47000, loss = 2.51574 +I0112 01:22:40.087636 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 01:22:40.087669 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.5625 +I0112 01:22:40.087685 12987 solver.cpp:244] Train net output #2: prob = 3.42049 (* 1 = 3.42049 loss) +I0112 01:22:40.342576 12987 sgd_solver.cpp:106] Iteration 47000, lr = 0.1 +I0112 01:31:27.564272 12987 solver.cpp:228] Iteration 48000, loss = 2.51063 +I0112 01:31:27.564494 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.34375 +I0112 01:31:27.564537 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 01:31:27.564566 12987 solver.cpp:244] Train net output #2: prob = 2.66241 (* 1 = 2.66241 loss) +I0112 01:31:27.819437 12987 sgd_solver.cpp:106] Iteration 48000, lr = 0.1 +I0112 01:40:14.322770 12987 solver.cpp:228] Iteration 49000, loss = 2.4983 +I0112 01:40:14.322978 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 01:40:14.323014 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 01:40:14.323026 12987 solver.cpp:244] Train net output #2: prob = 2.8593 (* 1 = 2.8593 loss) +I0112 01:40:14.576745 12987 sgd_solver.cpp:106] Iteration 49000, lr = 0.1 +I0112 01:49:01.595684 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_50000.caffemodel +I0112 01:49:03.072983 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_50000.solverstate +I0112 01:49:03.557168 12987 solver.cpp:228] Iteration 50000, loss = 2.50871 +I0112 01:49:03.557215 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 01:49:03.557224 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 01:49:03.557232 12987 solver.cpp:244] Train net output #2: prob = 2.63867 (* 1 = 2.63867 loss) +I0112 01:49:03.815968 12987 sgd_solver.cpp:106] Iteration 50000, lr = 0.1 +I0112 01:57:50.816953 12987 solver.cpp:228] Iteration 51000, loss = 2.47818 +I0112 01:57:50.817170 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 01:57:50.817203 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 01:57:50.817221 12987 solver.cpp:244] Train net output #2: prob = 2.26192 (* 1 = 2.26192 loss) +I0112 01:57:51.061949 12987 sgd_solver.cpp:106] Iteration 51000, lr = 0.1 +I0112 02:06:38.034086 12987 solver.cpp:228] Iteration 52000, loss = 2.47026 +I0112 02:06:38.034379 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 02:06:38.034416 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 02:06:38.034437 12987 solver.cpp:244] Train net output #2: prob = 2.26943 (* 1 = 2.26943 loss) +I0112 02:06:38.280436 12987 sgd_solver.cpp:106] Iteration 52000, lr = 0.1 +I0112 02:15:25.201050 12987 solver.cpp:228] Iteration 53000, loss = 2.45841 +I0112 02:15:25.201210 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 02:15:25.201220 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 02:15:25.201231 12987 solver.cpp:244] Train net output #2: prob = 1.9571 (* 1 = 1.9571 loss) +I0112 02:15:25.453336 12987 sgd_solver.cpp:106] Iteration 53000, lr = 0.1 +I0112 02:24:12.468830 12987 solver.cpp:228] Iteration 54000, loss = 2.46039 +I0112 02:24:12.469085 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.3125 +I0112 02:24:12.469126 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 02:24:12.469146 12987 solver.cpp:244] Train net output #2: prob = 2.86174 (* 1 = 2.86174 loss) +I0112 02:24:12.723803 12987 sgd_solver.cpp:106] Iteration 54000, lr = 0.1 +I0112 02:32:59.670706 12987 solver.cpp:228] Iteration 55000, loss = 2.45288 +I0112 02:32:59.670917 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 02:32:59.670939 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 02:32:59.670956 12987 solver.cpp:244] Train net output #2: prob = 2.45733 (* 1 = 2.45733 loss) +I0112 02:32:59.922922 12987 sgd_solver.cpp:106] Iteration 55000, lr = 0.1 +I0112 02:41:46.904016 12987 solver.cpp:228] Iteration 56000, loss = 2.45987 +I0112 02:41:46.904307 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 02:41:46.904345 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 02:41:46.904363 12987 solver.cpp:244] Train net output #2: prob = 2.3477 (* 1 = 2.3477 loss) +I0112 02:41:47.156303 12987 sgd_solver.cpp:106] Iteration 56000, lr = 0.1 +I0112 02:50:34.215164 12987 solver.cpp:228] Iteration 57000, loss = 2.44351 +I0112 02:50:34.216712 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 02:50:34.216724 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 02:50:34.216734 12987 solver.cpp:244] Train net output #2: prob = 2.41596 (* 1 = 2.41596 loss) +I0112 02:50:34.470496 12987 sgd_solver.cpp:106] Iteration 57000, lr = 0.1 +I0112 02:59:21.567826 12987 solver.cpp:228] Iteration 58000, loss = 2.4377 +I0112 02:59:21.568054 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 02:59:21.568095 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 02:59:21.568119 12987 solver.cpp:244] Train net output #2: prob = 2.17313 (* 1 = 2.17313 loss) +I0112 02:59:21.822067 12987 sgd_solver.cpp:106] Iteration 58000, lr = 0.1 +I0112 03:08:08.938733 12987 solver.cpp:228] Iteration 59000, loss = 2.44681 +I0112 03:08:08.938984 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 03:08:08.939033 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 03:08:08.939056 12987 solver.cpp:244] Train net output #2: prob = 2.46513 (* 1 = 2.46513 loss) +I0112 03:08:09.194520 12987 sgd_solver.cpp:106] Iteration 59000, lr = 0.1 +I0112 03:16:55.732755 12987 solver.cpp:228] Iteration 60000, loss = 2.41976 +I0112 03:16:55.732961 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 03:16:55.732973 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 03:16:55.732983 12987 solver.cpp:244] Train net output #2: prob = 2.21423 (* 1 = 2.21423 loss) +I0112 03:16:55.984216 12987 sgd_solver.cpp:106] Iteration 60000, lr = 0.1 +I0112 03:25:43.128953 12987 solver.cpp:228] Iteration 61000, loss = 2.42951 +I0112 03:25:43.129179 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 03:25:43.129209 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 03:25:43.129223 12987 solver.cpp:244] Train net output #2: prob = 2.68649 (* 1 = 2.68649 loss) +I0112 03:25:43.382045 12987 sgd_solver.cpp:106] Iteration 61000, lr = 0.1 +I0112 03:34:30.236553 12987 solver.cpp:228] Iteration 62000, loss = 2.40596 +I0112 03:34:30.236698 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.34375 +I0112 03:34:30.236708 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 03:34:30.236717 12987 solver.cpp:244] Train net output #2: prob = 3.00206 (* 1 = 3.00206 loss) +I0112 03:34:30.487027 12987 sgd_solver.cpp:106] Iteration 62000, lr = 0.1 +I0112 03:43:17.701647 12987 solver.cpp:228] Iteration 63000, loss = 2.4052 +I0112 03:43:17.702433 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 03:43:17.702450 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 03:43:17.702461 12987 solver.cpp:244] Train net output #2: prob = 2.48928 (* 1 = 2.48928 loss) +I0112 03:43:17.953034 12987 sgd_solver.cpp:106] Iteration 63000, lr = 0.1 +I0112 03:52:04.590734 12987 solver.cpp:228] Iteration 64000, loss = 2.41014 +I0112 03:52:04.591063 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 03:52:04.591091 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 03:52:04.591102 12987 solver.cpp:244] Train net output #2: prob = 2.99285 (* 1 = 2.99285 loss) +I0112 03:52:04.843086 12987 sgd_solver.cpp:106] Iteration 64000, lr = 0.1 +I0112 04:00:51.375036 12987 solver.cpp:228] Iteration 65000, loss = 2.41511 +I0112 04:00:51.375267 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 04:00:51.375304 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 04:00:51.375322 12987 solver.cpp:244] Train net output #2: prob = 1.94029 (* 1 = 1.94029 loss) +I0112 04:00:51.624764 12987 sgd_solver.cpp:106] Iteration 65000, lr = 0.1 +I0112 04:09:38.132009 12987 solver.cpp:228] Iteration 66000, loss = 2.39274 +I0112 04:09:38.132205 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 04:09:38.132215 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 04:09:38.132226 12987 solver.cpp:244] Train net output #2: prob = 2.30199 (* 1 = 2.30199 loss) +I0112 04:09:38.386740 12987 sgd_solver.cpp:106] Iteration 66000, lr = 0.1 +I0112 04:18:25.448900 12987 solver.cpp:228] Iteration 67000, loss = 2.38338 +I0112 04:18:25.449163 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 04:18:25.449200 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 04:18:25.449223 12987 solver.cpp:244] Train net output #2: prob = 2.79311 (* 1 = 2.79311 loss) +I0112 04:18:25.693289 12987 sgd_solver.cpp:106] Iteration 67000, lr = 0.1 +I0112 04:27:11.757225 12987 solver.cpp:228] Iteration 68000, loss = 2.3978 +I0112 04:27:11.757434 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 04:27:11.757462 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 04:27:11.757472 12987 solver.cpp:244] Train net output #2: prob = 2.54036 (* 1 = 2.54036 loss) +I0112 04:27:12.011216 12987 sgd_solver.cpp:106] Iteration 68000, lr = 0.1 +I0112 04:35:58.275741 12987 solver.cpp:228] Iteration 69000, loss = 2.38424 +I0112 04:35:58.275975 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.34375 +I0112 04:35:58.276008 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 04:35:58.276026 12987 solver.cpp:244] Train net output #2: prob = 2.59031 (* 1 = 2.59031 loss) +I0112 04:35:58.527078 12987 sgd_solver.cpp:106] Iteration 69000, lr = 0.1 +I0112 04:44:45.306143 12987 solver.cpp:228] Iteration 70000, loss = 2.39143 +I0112 04:44:45.306408 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 04:44:45.306448 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 04:44:45.306469 12987 solver.cpp:244] Train net output #2: prob = 2.31873 (* 1 = 2.31873 loss) +I0112 04:44:45.559789 12987 sgd_solver.cpp:106] Iteration 70000, lr = 0.1 +I0112 04:53:32.301234 12987 solver.cpp:228] Iteration 71000, loss = 2.37386 +I0112 04:53:32.301424 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 04:53:32.301441 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 04:53:32.301451 12987 solver.cpp:244] Train net output #2: prob = 2.45887 (* 1 = 2.45887 loss) +I0112 04:53:32.547807 12987 sgd_solver.cpp:106] Iteration 71000, lr = 0.1 +I0112 05:02:19.170213 12987 solver.cpp:228] Iteration 72000, loss = 2.38263 +I0112 05:02:19.170521 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 05:02:19.170562 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 05:02:19.170583 12987 solver.cpp:244] Train net output #2: prob = 2.2873 (* 1 = 2.2873 loss) +I0112 05:02:19.428246 12987 sgd_solver.cpp:106] Iteration 72000, lr = 0.1 +I0112 05:11:05.784584 12987 solver.cpp:228] Iteration 73000, loss = 2.39581 +I0112 05:11:05.784878 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 05:11:05.784919 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 05:11:05.784937 12987 solver.cpp:244] Train net output #2: prob = 2.63134 (* 1 = 2.63134 loss) +I0112 05:11:06.040026 12987 sgd_solver.cpp:106] Iteration 73000, lr = 0.1 +I0112 05:19:52.561288 12987 solver.cpp:228] Iteration 74000, loss = 2.37317 +I0112 05:19:52.561568 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 05:19:52.561611 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 05:19:52.561630 12987 solver.cpp:244] Train net output #2: prob = 2.61377 (* 1 = 2.61377 loss) +I0112 05:19:52.806979 12987 sgd_solver.cpp:106] Iteration 74000, lr = 0.1 +I0112 05:28:41.154952 12987 solver.cpp:228] Iteration 75000, loss = 2.37243 +I0112 05:28:41.155133 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 05:28:41.155143 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 05:28:41.155153 12987 solver.cpp:244] Train net output #2: prob = 2.66701 (* 1 = 2.66701 loss) +I0112 05:28:41.433646 12987 sgd_solver.cpp:106] Iteration 75000, lr = 0.1 +I0112 05:37:28.189174 12987 solver.cpp:228] Iteration 76000, loss = 2.36024 +I0112 05:37:28.189393 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 05:37:28.189420 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 05:37:28.189438 12987 solver.cpp:244] Train net output #2: prob = 2.41907 (* 1 = 2.41907 loss) +I0112 05:37:28.442243 12987 sgd_solver.cpp:106] Iteration 76000, lr = 0.1 +I0112 05:46:14.072832 12987 solver.cpp:228] Iteration 77000, loss = 2.34981 +I0112 05:46:14.073041 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 05:46:14.073074 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 05:46:14.073092 12987 solver.cpp:244] Train net output #2: prob = 2.13221 (* 1 = 2.13221 loss) +I0112 05:46:14.325518 12987 sgd_solver.cpp:106] Iteration 77000, lr = 0.1 +I0112 05:55:00.477905 12987 solver.cpp:228] Iteration 78000, loss = 2.36606 +I0112 05:55:00.478163 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 05:55:00.478215 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 05:55:00.478235 12987 solver.cpp:244] Train net output #2: prob = 2.75565 (* 1 = 2.75565 loss) +I0112 05:55:00.731194 12987 sgd_solver.cpp:106] Iteration 78000, lr = 0.1 +I0112 06:03:46.346845 12987 solver.cpp:228] Iteration 79000, loss = 2.35587 +I0112 06:03:46.347082 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 06:03:46.347117 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 06:03:46.347136 12987 solver.cpp:244] Train net output #2: prob = 2.53716 (* 1 = 2.53716 loss) +I0112 06:03:46.591575 12987 sgd_solver.cpp:106] Iteration 79000, lr = 0.1 +I0112 06:12:32.098328 12987 solver.cpp:228] Iteration 80000, loss = 2.34672 +I0112 06:12:32.098562 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 06:12:32.098592 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0112 06:12:32.098610 12987 solver.cpp:244] Train net output #2: prob = 1.69251 (* 1 = 1.69251 loss) +I0112 06:12:32.350061 12987 sgd_solver.cpp:106] Iteration 80000, lr = 0.1 +I0112 06:21:17.823294 12987 solver.cpp:228] Iteration 81000, loss = 2.32745 +I0112 06:21:17.823508 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 06:21:17.823532 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 06:21:17.823544 12987 solver.cpp:244] Train net output #2: prob = 2.58986 (* 1 = 2.58986 loss) +I0112 06:21:18.077594 12987 sgd_solver.cpp:106] Iteration 81000, lr = 0.1 +I0112 06:30:03.407050 12987 solver.cpp:228] Iteration 82000, loss = 2.32677 +I0112 06:30:03.407260 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 06:30:03.407271 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 06:30:03.407284 12987 solver.cpp:244] Train net output #2: prob = 2.60217 (* 1 = 2.60217 loss) +I0112 06:30:03.661227 12987 sgd_solver.cpp:106] Iteration 82000, lr = 0.1 +I0112 06:38:48.895114 12987 solver.cpp:228] Iteration 83000, loss = 2.3247 +I0112 06:38:48.895323 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 06:38:48.895334 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 06:38:48.895345 12987 solver.cpp:244] Train net output #2: prob = 2.54089 (* 1 = 2.54089 loss) +I0112 06:38:49.146829 12987 sgd_solver.cpp:106] Iteration 83000, lr = 0.1 +I0112 06:47:34.552009 12987 solver.cpp:228] Iteration 84000, loss = 2.34438 +I0112 06:47:34.552266 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 06:47:34.552304 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 06:47:34.552323 12987 solver.cpp:244] Train net output #2: prob = 2.4799 (* 1 = 2.4799 loss) +I0112 06:47:34.801986 12987 sgd_solver.cpp:106] Iteration 84000, lr = 0.1 +I0112 06:56:20.791672 12987 solver.cpp:228] Iteration 85000, loss = 2.31224 +I0112 06:56:20.801103 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0112 06:56:20.801122 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 06:56:20.801132 12987 solver.cpp:244] Train net output #2: prob = 2.06111 (* 1 = 2.06111 loss) +I0112 06:56:21.045907 12987 sgd_solver.cpp:106] Iteration 85000, lr = 0.1 +I0112 07:05:06.779191 12987 solver.cpp:228] Iteration 86000, loss = 2.31973 +I0112 07:05:06.779373 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 07:05:06.779384 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 07:05:06.779394 12987 solver.cpp:244] Train net output #2: prob = 2.69454 (* 1 = 2.69454 loss) +I0112 07:05:07.031642 12987 sgd_solver.cpp:106] Iteration 86000, lr = 0.1 +I0112 07:13:53.448901 12987 solver.cpp:228] Iteration 87000, loss = 2.3169 +I0112 07:13:53.449126 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 07:13:53.449167 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 07:13:53.449185 12987 solver.cpp:244] Train net output #2: prob = 2.6642 (* 1 = 2.6642 loss) +I0112 07:13:53.702965 12987 sgd_solver.cpp:106] Iteration 87000, lr = 0.1 +I0112 07:22:39.754329 12987 solver.cpp:228] Iteration 88000, loss = 2.3234 +I0112 07:22:39.754601 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 07:22:39.754643 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 07:22:39.754678 12987 solver.cpp:244] Train net output #2: prob = 1.77427 (* 1 = 1.77427 loss) +I0112 07:22:40.006945 12987 sgd_solver.cpp:106] Iteration 88000, lr = 0.1 +I0112 07:31:26.373978 12987 solver.cpp:228] Iteration 89000, loss = 2.32533 +I0112 07:31:26.374236 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 07:31:26.374253 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 07:31:26.374269 12987 solver.cpp:244] Train net output #2: prob = 2.27541 (* 1 = 2.27541 loss) +I0112 07:31:26.618456 12987 sgd_solver.cpp:106] Iteration 89000, lr = 0.1 +I0112 07:40:12.512712 12987 solver.cpp:228] Iteration 90000, loss = 2.32351 +I0112 07:40:12.512903 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 07:40:12.512914 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 07:40:12.512924 12987 solver.cpp:244] Train net output #2: prob = 2.24777 (* 1 = 2.24777 loss) +I0112 07:40:12.767663 12987 sgd_solver.cpp:106] Iteration 90000, lr = 0.1 +I0112 07:48:58.683662 12987 solver.cpp:228] Iteration 91000, loss = 2.30555 +I0112 07:48:58.684823 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 07:48:58.684834 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 07:48:58.684844 12987 solver.cpp:244] Train net output #2: prob = 2.33261 (* 1 = 2.33261 loss) +I0112 07:48:58.937081 12987 sgd_solver.cpp:106] Iteration 91000, lr = 0.1 +I0112 07:57:44.854409 12987 solver.cpp:228] Iteration 92000, loss = 2.30733 +I0112 07:57:44.854593 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 07:57:44.854601 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 07:57:44.854611 12987 solver.cpp:244] Train net output #2: prob = 2.69926 (* 1 = 2.69926 loss) +I0112 07:57:45.111721 12987 sgd_solver.cpp:106] Iteration 92000, lr = 0.1 +I0112 08:06:31.450410 12987 solver.cpp:228] Iteration 93000, loss = 2.30306 +I0112 08:06:31.450649 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.34375 +I0112 08:06:31.450685 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.59375 +I0112 08:06:31.450700 12987 solver.cpp:244] Train net output #2: prob = 2.967 (* 1 = 2.967 loss) +I0112 08:06:31.704851 12987 sgd_solver.cpp:106] Iteration 93000, lr = 0.1 +I0112 08:15:17.747375 12987 solver.cpp:228] Iteration 94000, loss = 2.31243 +I0112 08:15:17.747593 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 08:15:17.747617 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 08:15:17.747627 12987 solver.cpp:244] Train net output #2: prob = 2.38538 (* 1 = 2.38538 loss) +I0112 08:15:17.999441 12987 sgd_solver.cpp:106] Iteration 94000, lr = 0.1 +I0112 08:24:03.959964 12987 solver.cpp:228] Iteration 95000, loss = 2.31208 +I0112 08:24:03.960127 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 08:24:03.960139 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0112 08:24:03.960147 12987 solver.cpp:244] Train net output #2: prob = 1.89207 (* 1 = 1.89207 loss) +I0112 08:24:04.215617 12987 sgd_solver.cpp:106] Iteration 95000, lr = 0.1 +I0112 08:32:49.704453 12987 solver.cpp:228] Iteration 96000, loss = 2.31138 +I0112 08:32:49.704670 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.3125 +I0112 08:32:49.704708 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 08:32:49.704726 12987 solver.cpp:244] Train net output #2: prob = 2.93967 (* 1 = 2.93967 loss) +I0112 08:32:49.960471 12987 sgd_solver.cpp:106] Iteration 96000, lr = 0.1 +I0112 08:41:35.279557 12987 solver.cpp:228] Iteration 97000, loss = 2.30851 +I0112 08:41:35.279745 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 08:41:35.279762 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 08:41:35.279775 12987 solver.cpp:244] Train net output #2: prob = 2.10711 (* 1 = 2.10711 loss) +I0112 08:41:35.535351 12987 sgd_solver.cpp:106] Iteration 97000, lr = 0.1 +I0112 08:50:21.242177 12987 solver.cpp:228] Iteration 98000, loss = 2.29613 +I0112 08:50:21.242411 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 08:50:21.242447 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 08:50:21.242465 12987 solver.cpp:244] Train net output #2: prob = 1.98759 (* 1 = 1.98759 loss) +I0112 08:50:21.494066 12987 sgd_solver.cpp:106] Iteration 98000, lr = 0.1 +I0112 08:59:07.136905 12987 solver.cpp:228] Iteration 99000, loss = 2.30875 +I0112 08:59:07.137156 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 08:59:07.137197 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 08:59:07.137214 12987 solver.cpp:244] Train net output #2: prob = 2.10271 (* 1 = 2.10271 loss) +I0112 08:59:07.387485 12987 sgd_solver.cpp:106] Iteration 99000, lr = 0.1 +I0112 09:07:53.528118 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_100000.caffemodel +I0112 09:07:55.102588 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_100000.solverstate +I0112 09:07:55.524188 12987 solver.cpp:228] Iteration 100000, loss = 2.29968 +I0112 09:07:55.524235 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 09:07:55.524242 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 09:07:55.524250 12987 solver.cpp:244] Train net output #2: prob = 2.21137 (* 1 = 2.21137 loss) +I0112 09:07:55.795903 12987 sgd_solver.cpp:106] Iteration 100000, lr = 0.1 +I0112 09:16:42.444166 12987 solver.cpp:228] Iteration 101000, loss = 2.3026 +I0112 09:16:42.444399 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 09:16:42.444438 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 09:16:42.444458 12987 solver.cpp:244] Train net output #2: prob = 2.35811 (* 1 = 2.35811 loss) +I0112 09:16:42.686473 12987 sgd_solver.cpp:106] Iteration 101000, lr = 0.1 +I0112 09:25:28.906234 12987 solver.cpp:228] Iteration 102000, loss = 2.29956 +I0112 09:25:28.906402 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 09:25:28.906419 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 09:25:28.906430 12987 solver.cpp:244] Train net output #2: prob = 1.92636 (* 1 = 1.92636 loss) +I0112 09:25:29.158692 12987 sgd_solver.cpp:106] Iteration 102000, lr = 0.1 +I0112 09:34:15.349766 12987 solver.cpp:228] Iteration 103000, loss = 2.29405 +I0112 09:34:15.349980 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.3125 +I0112 09:34:15.350014 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 09:34:15.350033 12987 solver.cpp:244] Train net output #2: prob = 2.98125 (* 1 = 2.98125 loss) +I0112 09:34:15.601759 12987 sgd_solver.cpp:106] Iteration 103000, lr = 0.1 +I0112 09:43:02.016628 12987 solver.cpp:228] Iteration 104000, loss = 2.29632 +I0112 09:43:02.016870 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 09:43:02.016908 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 09:43:02.016932 12987 solver.cpp:244] Train net output #2: prob = 2.53534 (* 1 = 2.53534 loss) +I0112 09:43:02.271571 12987 sgd_solver.cpp:106] Iteration 104000, lr = 0.1 +I0112 09:51:48.247961 12987 solver.cpp:228] Iteration 105000, loss = 2.30426 +I0112 09:51:48.248239 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 09:51:48.248273 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 09:51:48.248291 12987 solver.cpp:244] Train net output #2: prob = 2.37188 (* 1 = 2.37188 loss) +I0112 09:51:48.497439 12987 sgd_solver.cpp:106] Iteration 105000, lr = 0.1 +I0112 10:00:34.800245 12987 solver.cpp:228] Iteration 106000, loss = 2.28877 +I0112 10:00:34.800537 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 10:00:34.800588 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 10:00:34.800621 12987 solver.cpp:244] Train net output #2: prob = 2.63446 (* 1 = 2.63446 loss) +I0112 10:00:35.040653 12987 sgd_solver.cpp:106] Iteration 106000, lr = 0.1 +I0112 10:09:21.132880 12987 solver.cpp:228] Iteration 107000, loss = 2.26385 +I0112 10:09:21.133105 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 10:09:21.133136 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 10:09:21.133154 12987 solver.cpp:244] Train net output #2: prob = 2.01063 (* 1 = 2.01063 loss) +I0112 10:09:21.383071 12987 sgd_solver.cpp:106] Iteration 107000, lr = 0.1 +I0112 10:18:07.344321 12987 solver.cpp:228] Iteration 108000, loss = 2.29071 +I0112 10:18:07.344490 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 10:18:07.344501 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 10:18:07.344509 12987 solver.cpp:244] Train net output #2: prob = 2.34783 (* 1 = 2.34783 loss) +I0112 10:18:07.599982 12987 sgd_solver.cpp:106] Iteration 108000, lr = 0.1 +I0112 10:26:54.076910 12987 solver.cpp:228] Iteration 109000, loss = 2.28971 +I0112 10:26:54.077133 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 10:26:54.077165 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 10:26:54.077180 12987 solver.cpp:244] Train net output #2: prob = 2.26378 (* 1 = 2.26378 loss) +I0112 10:26:54.331396 12987 sgd_solver.cpp:106] Iteration 109000, lr = 0.1 +I0112 10:35:40.151172 12987 solver.cpp:228] Iteration 110000, loss = 2.3018 +I0112 10:35:40.151453 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 10:35:40.151492 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 10:35:40.151510 12987 solver.cpp:244] Train net output #2: prob = 1.63381 (* 1 = 1.63381 loss) +I0112 10:35:40.402863 12987 sgd_solver.cpp:106] Iteration 110000, lr = 0.1 +I0112 10:44:26.393713 12987 solver.cpp:228] Iteration 111000, loss = 2.28108 +I0112 10:44:26.393947 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 10:44:26.393985 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0112 10:44:26.394001 12987 solver.cpp:244] Train net output #2: prob = 1.80065 (* 1 = 1.80065 loss) +I0112 10:44:26.646034 12987 sgd_solver.cpp:106] Iteration 111000, lr = 0.1 +I0112 10:53:12.485764 12987 solver.cpp:228] Iteration 112000, loss = 2.29112 +I0112 10:53:12.486045 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 10:53:12.486063 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 10:53:12.486079 12987 solver.cpp:244] Train net output #2: prob = 2.36074 (* 1 = 2.36074 loss) +I0112 10:53:12.741686 12987 sgd_solver.cpp:106] Iteration 112000, lr = 0.1 +I0112 11:01:58.504647 12987 solver.cpp:228] Iteration 113000, loss = 2.29807 +I0112 11:01:58.504844 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 11:01:58.504855 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 11:01:58.504865 12987 solver.cpp:244] Train net output #2: prob = 2.45964 (* 1 = 2.45964 loss) +I0112 11:01:58.757604 12987 sgd_solver.cpp:106] Iteration 113000, lr = 0.1 +I0112 11:10:45.016729 12987 solver.cpp:228] Iteration 114000, loss = 2.29833 +I0112 11:10:45.016914 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 11:10:45.016926 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 11:10:45.016935 12987 solver.cpp:244] Train net output #2: prob = 2.169 (* 1 = 2.169 loss) +I0112 11:10:45.267233 12987 sgd_solver.cpp:106] Iteration 114000, lr = 0.1 +I0112 11:19:30.758242 12987 solver.cpp:228] Iteration 115000, loss = 2.30278 +I0112 11:19:30.758468 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0112 11:19:30.758496 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 11:19:30.758512 12987 solver.cpp:244] Train net output #2: prob = 2.18401 (* 1 = 2.18401 loss) +I0112 11:19:31.009511 12987 sgd_solver.cpp:106] Iteration 115000, lr = 0.1 +I0112 11:28:16.882134 12987 solver.cpp:228] Iteration 116000, loss = 2.28755 +I0112 11:28:16.882417 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.3125 +I0112 11:28:16.882469 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 11:28:16.882490 12987 solver.cpp:244] Train net output #2: prob = 3.25121 (* 1 = 3.25121 loss) +I0112 11:28:17.138654 12987 sgd_solver.cpp:106] Iteration 116000, lr = 0.1 +I0112 11:37:03.117038 12987 solver.cpp:228] Iteration 117000, loss = 2.27495 +I0112 11:37:03.117257 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 11:37:03.117272 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 11:37:03.117286 12987 solver.cpp:244] Train net output #2: prob = 2.44668 (* 1 = 2.44668 loss) +I0112 11:37:03.373585 12987 sgd_solver.cpp:106] Iteration 117000, lr = 0.1 +I0112 11:45:49.270014 12987 solver.cpp:228] Iteration 118000, loss = 2.28657 +I0112 11:45:49.270216 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 11:45:49.270244 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 11:45:49.270256 12987 solver.cpp:244] Train net output #2: prob = 2.64605 (* 1 = 2.64605 loss) +I0112 11:45:49.523656 12987 sgd_solver.cpp:106] Iteration 118000, lr = 0.1 +I0112 11:54:35.436125 12987 solver.cpp:228] Iteration 119000, loss = 2.28194 +I0112 11:54:35.436338 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.28125 +I0112 11:54:35.436357 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 11:54:35.436373 12987 solver.cpp:244] Train net output #2: prob = 3.10069 (* 1 = 3.10069 loss) +I0112 11:54:35.689260 12987 sgd_solver.cpp:106] Iteration 119000, lr = 0.1 +I0112 12:03:22.148850 12987 solver.cpp:228] Iteration 120000, loss = 2.27154 +I0112 12:03:22.149094 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 12:03:22.149125 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 12:03:22.149137 12987 solver.cpp:244] Train net output #2: prob = 1.89242 (* 1 = 1.89242 loss) +I0112 12:03:22.405689 12987 sgd_solver.cpp:106] Iteration 120000, lr = 0.1 +I0112 12:12:08.982674 12987 solver.cpp:228] Iteration 121000, loss = 2.27044 +I0112 12:12:08.982892 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 12:12:08.982918 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 12:12:08.982933 12987 solver.cpp:244] Train net output #2: prob = 2.16504 (* 1 = 2.16504 loss) +I0112 12:12:09.235532 12987 sgd_solver.cpp:106] Iteration 121000, lr = 0.1 +I0112 12:20:55.295084 12987 solver.cpp:228] Iteration 122000, loss = 2.25647 +I0112 12:20:55.295343 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0112 12:20:55.295377 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 12:20:55.295394 12987 solver.cpp:244] Train net output #2: prob = 2.23937 (* 1 = 2.23937 loss) +I0112 12:20:55.551671 12987 sgd_solver.cpp:106] Iteration 122000, lr = 0.1 +I0112 12:29:41.467645 12987 solver.cpp:228] Iteration 123000, loss = 2.27738 +I0112 12:29:41.467813 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 12:29:41.467824 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 12:29:41.467834 12987 solver.cpp:244] Train net output #2: prob = 2.09427 (* 1 = 2.09427 loss) +I0112 12:29:41.721031 12987 sgd_solver.cpp:106] Iteration 123000, lr = 0.1 +I0112 12:38:27.696090 12987 solver.cpp:228] Iteration 124000, loss = 2.27655 +I0112 12:38:27.696342 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 12:38:27.696389 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 12:38:27.696410 12987 solver.cpp:244] Train net output #2: prob = 1.99143 (* 1 = 1.99143 loss) +I0112 12:38:27.951513 12987 sgd_solver.cpp:106] Iteration 124000, lr = 0.1 +I0112 12:47:14.280184 12987 solver.cpp:228] Iteration 125000, loss = 2.25132 +I0112 12:47:14.280417 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 12:47:14.280454 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 12:47:14.280472 12987 solver.cpp:244] Train net output #2: prob = 2.4414 (* 1 = 2.4414 loss) +I0112 12:47:14.534373 12987 sgd_solver.cpp:106] Iteration 125000, lr = 0.1 +I0112 12:56:00.969074 12987 solver.cpp:228] Iteration 126000, loss = 2.26309 +I0112 12:56:00.969315 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 12:56:00.969341 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 12:56:00.969353 12987 solver.cpp:244] Train net output #2: prob = 1.96184 (* 1 = 1.96184 loss) +I0112 12:56:01.223911 12987 sgd_solver.cpp:106] Iteration 126000, lr = 0.1 +I0112 13:04:47.213049 12987 solver.cpp:228] Iteration 127000, loss = 2.25626 +I0112 13:04:47.213243 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0112 13:04:47.213260 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0112 13:04:47.213270 12987 solver.cpp:244] Train net output #2: prob = 1.59388 (* 1 = 1.59388 loss) +I0112 13:04:47.463850 12987 sgd_solver.cpp:106] Iteration 127000, lr = 0.1 +I0112 13:13:33.709070 12987 solver.cpp:228] Iteration 128000, loss = 2.26527 +I0112 13:13:33.709324 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 13:13:33.709370 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.5625 +I0112 13:13:33.709391 12987 solver.cpp:244] Train net output #2: prob = 2.67435 (* 1 = 2.67435 loss) +I0112 13:13:33.963237 12987 sgd_solver.cpp:106] Iteration 128000, lr = 0.1 +I0112 13:22:19.925849 12987 solver.cpp:228] Iteration 129000, loss = 2.26478 +I0112 13:22:19.926108 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0112 13:22:19.926147 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 13:22:19.926170 12987 solver.cpp:244] Train net output #2: prob = 1.55423 (* 1 = 1.55423 loss) +I0112 13:22:20.181115 12987 sgd_solver.cpp:106] Iteration 129000, lr = 0.1 +I0112 13:31:06.189553 12987 solver.cpp:228] Iteration 130000, loss = 2.25574 +I0112 13:31:06.189821 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 13:31:06.189863 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 13:31:06.189883 12987 solver.cpp:244] Train net output #2: prob = 2.03542 (* 1 = 2.03542 loss) +I0112 13:31:06.433465 12987 sgd_solver.cpp:106] Iteration 130000, lr = 0.1 +I0112 13:39:52.279276 12987 solver.cpp:228] Iteration 131000, loss = 2.25087 +I0112 13:39:52.279486 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 13:39:52.279516 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 13:39:52.279537 12987 solver.cpp:244] Train net output #2: prob = 2.02933 (* 1 = 2.02933 loss) +I0112 13:39:52.532995 12987 sgd_solver.cpp:106] Iteration 131000, lr = 0.1 +I0112 13:48:38.690311 12987 solver.cpp:228] Iteration 132000, loss = 2.24575 +I0112 13:48:38.690515 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 13:48:38.690526 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 13:48:38.690536 12987 solver.cpp:244] Train net output #2: prob = 2.17591 (* 1 = 2.17591 loss) +I0112 13:48:38.944998 12987 sgd_solver.cpp:106] Iteration 132000, lr = 0.1 +I0112 13:57:25.779604 12987 solver.cpp:228] Iteration 133000, loss = 2.25187 +I0112 13:57:25.779803 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 13:57:25.779815 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 13:57:25.779825 12987 solver.cpp:244] Train net output #2: prob = 2.28928 (* 1 = 2.28928 loss) +I0112 13:57:26.035521 12987 sgd_solver.cpp:106] Iteration 133000, lr = 0.1 +I0112 14:06:12.720248 12987 solver.cpp:228] Iteration 134000, loss = 2.24671 +I0112 14:06:12.720459 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 14:06:12.720471 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 14:06:12.720481 12987 solver.cpp:244] Train net output #2: prob = 2.15149 (* 1 = 2.15149 loss) +I0112 14:06:12.975373 12987 sgd_solver.cpp:106] Iteration 134000, lr = 0.1 +I0112 14:14:59.594756 12987 solver.cpp:228] Iteration 135000, loss = 2.25656 +I0112 14:14:59.595042 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 14:14:59.595082 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 14:14:59.595100 12987 solver.cpp:244] Train net output #2: prob = 2.11033 (* 1 = 2.11033 loss) +I0112 14:14:59.852357 12987 sgd_solver.cpp:106] Iteration 135000, lr = 0.1 +I0112 14:23:46.686883 12987 solver.cpp:228] Iteration 136000, loss = 2.25335 +I0112 14:23:46.687109 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 14:23:46.687142 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 14:23:46.687160 12987 solver.cpp:244] Train net output #2: prob = 2.34812 (* 1 = 2.34812 loss) +I0112 14:23:46.943317 12987 sgd_solver.cpp:106] Iteration 136000, lr = 0.1 +I0112 14:32:33.617111 12987 solver.cpp:228] Iteration 137000, loss = 2.25481 +I0112 14:32:33.617368 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 14:32:33.617410 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 14:32:33.617429 12987 solver.cpp:244] Train net output #2: prob = 2.41579 (* 1 = 2.41579 loss) +I0112 14:32:33.872390 12987 sgd_solver.cpp:106] Iteration 137000, lr = 0.1 +I0112 14:41:20.026636 12987 solver.cpp:228] Iteration 138000, loss = 2.25839 +I0112 14:41:20.027187 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 14:41:20.027199 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 14:41:20.027209 12987 solver.cpp:244] Train net output #2: prob = 2.42108 (* 1 = 2.42108 loss) +I0112 14:41:20.280032 12987 sgd_solver.cpp:106] Iteration 138000, lr = 0.1 +I0112 14:50:07.019567 12987 solver.cpp:228] Iteration 139000, loss = 2.25952 +I0112 14:50:07.020932 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 14:50:07.020944 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 14:50:07.020956 12987 solver.cpp:244] Train net output #2: prob = 1.64353 (* 1 = 1.64353 loss) +I0112 14:50:07.272058 12987 sgd_solver.cpp:106] Iteration 139000, lr = 0.1 +I0112 14:58:53.985810 12987 solver.cpp:228] Iteration 140000, loss = 2.24671 +I0112 14:58:53.986071 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 14:58:53.986110 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 14:58:53.986133 12987 solver.cpp:244] Train net output #2: prob = 1.82928 (* 1 = 1.82928 loss) +I0112 14:58:54.240671 12987 sgd_solver.cpp:106] Iteration 140000, lr = 0.1 +I0112 15:07:40.849030 12987 solver.cpp:228] Iteration 141000, loss = 2.23928 +I0112 15:07:40.849283 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 15:07:40.849324 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 15:07:40.849344 12987 solver.cpp:244] Train net output #2: prob = 1.63814 (* 1 = 1.63814 loss) +I0112 15:07:41.102998 12987 sgd_solver.cpp:106] Iteration 141000, lr = 0.1 +I0112 15:16:27.301677 12987 solver.cpp:228] Iteration 142000, loss = 2.24414 +I0112 15:16:27.301882 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 15:16:27.301893 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 15:16:27.301904 12987 solver.cpp:244] Train net output #2: prob = 1.95938 (* 1 = 1.95938 loss) +I0112 15:16:27.545691 12987 sgd_solver.cpp:106] Iteration 142000, lr = 0.1 +I0112 15:25:13.881003 12987 solver.cpp:228] Iteration 143000, loss = 2.25347 +I0112 15:25:13.881191 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 15:25:13.881206 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 15:25:13.881220 12987 solver.cpp:244] Train net output #2: prob = 1.66744 (* 1 = 1.66744 loss) +I0112 15:25:14.137022 12987 sgd_solver.cpp:106] Iteration 143000, lr = 0.1 +I0112 15:34:00.722082 12987 solver.cpp:228] Iteration 144000, loss = 2.26032 +I0112 15:34:00.722327 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 15:34:00.722358 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 15:34:00.722369 12987 solver.cpp:244] Train net output #2: prob = 2.01127 (* 1 = 2.01127 loss) +I0112 15:34:00.978003 12987 sgd_solver.cpp:106] Iteration 144000, lr = 0.1 +I0112 15:42:47.334198 12987 solver.cpp:228] Iteration 145000, loss = 2.26895 +I0112 15:42:47.334447 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.3125 +I0112 15:42:47.334475 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 15:42:47.334493 12987 solver.cpp:244] Train net output #2: prob = 2.64196 (* 1 = 2.64196 loss) +I0112 15:42:47.586117 12987 sgd_solver.cpp:106] Iteration 145000, lr = 0.1 +I0112 15:51:33.619643 12987 solver.cpp:228] Iteration 146000, loss = 2.2422 +I0112 15:51:33.619865 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 15:51:33.619897 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 15:51:33.619916 12987 solver.cpp:244] Train net output #2: prob = 1.77736 (* 1 = 1.77736 loss) +I0112 15:51:33.871471 12987 sgd_solver.cpp:106] Iteration 146000, lr = 0.1 +I0112 16:00:19.821030 12987 solver.cpp:228] Iteration 147000, loss = 2.23613 +I0112 16:00:19.821290 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 16:00:19.821329 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 16:00:19.821347 12987 solver.cpp:244] Train net output #2: prob = 2.0649 (* 1 = 2.0649 loss) +I0112 16:00:20.074436 12987 sgd_solver.cpp:106] Iteration 147000, lr = 0.1 +I0112 16:09:06.727931 12987 solver.cpp:228] Iteration 148000, loss = 2.23709 +I0112 16:09:06.728185 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 16:09:06.728245 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 16:09:06.728261 12987 solver.cpp:244] Train net output #2: prob = 2.41549 (* 1 = 2.41549 loss) +I0112 16:09:06.973768 12987 sgd_solver.cpp:106] Iteration 148000, lr = 0.1 +I0112 16:17:53.859812 12987 solver.cpp:228] Iteration 149000, loss = 2.25737 +I0112 16:17:53.860075 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0112 16:17:53.860116 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 16:17:53.860134 12987 solver.cpp:244] Train net output #2: prob = 1.90046 (* 1 = 1.90046 loss) +I0112 16:17:54.114064 12987 sgd_solver.cpp:106] Iteration 149000, lr = 0.1 +I0112 16:26:40.434564 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_150000.caffemodel +I0112 16:26:42.009963 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_150000.solverstate +I0112 16:26:42.423980 12987 solver.cpp:228] Iteration 150000, loss = 2.24579 +I0112 16:26:42.424105 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0112 16:26:42.424129 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 16:26:42.424147 12987 solver.cpp:244] Train net output #2: prob = 1.59354 (* 1 = 1.59354 loss) +I0112 16:26:42.669497 12987 sgd_solver.cpp:106] Iteration 150000, lr = 0.1 +I0112 16:35:29.112756 12987 solver.cpp:228] Iteration 151000, loss = 2.24759 +I0112 16:35:29.112993 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 16:35:29.113028 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 16:35:29.113046 12987 solver.cpp:244] Train net output #2: prob = 2.65051 (* 1 = 2.65051 loss) +I0112 16:35:29.368057 12987 sgd_solver.cpp:106] Iteration 151000, lr = 0.1 +I0112 16:44:16.414520 12987 solver.cpp:228] Iteration 152000, loss = 2.25488 +I0112 16:44:16.414724 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 16:44:16.414739 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0112 16:44:16.414752 12987 solver.cpp:244] Train net output #2: prob = 1.82778 (* 1 = 1.82778 loss) +I0112 16:44:16.668269 12987 sgd_solver.cpp:106] Iteration 152000, lr = 0.1 +I0112 16:53:03.220856 12987 solver.cpp:228] Iteration 153000, loss = 2.25797 +I0112 16:53:03.221071 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 16:53:03.221097 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 16:53:03.221108 12987 solver.cpp:244] Train net output #2: prob = 1.59689 (* 1 = 1.59689 loss) +I0112 16:53:03.475852 12987 sgd_solver.cpp:106] Iteration 153000, lr = 0.1 +I0112 17:01:50.105157 12987 solver.cpp:228] Iteration 154000, loss = 2.26042 +I0112 17:01:50.105371 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.34375 +I0112 17:01:50.105399 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 17:01:50.105412 12987 solver.cpp:244] Train net output #2: prob = 2.89804 (* 1 = 2.89804 loss) +I0112 17:01:50.363112 12987 sgd_solver.cpp:106] Iteration 154000, lr = 0.1 +I0112 17:10:36.710120 12987 solver.cpp:228] Iteration 155000, loss = 2.25821 +I0112 17:10:36.710389 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0112 17:10:36.710409 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0112 17:10:36.710427 12987 solver.cpp:244] Train net output #2: prob = 1.70301 (* 1 = 1.70301 loss) +I0112 17:10:36.960276 12987 sgd_solver.cpp:106] Iteration 155000, lr = 0.1 +I0112 17:19:23.370117 12987 solver.cpp:228] Iteration 156000, loss = 2.24687 +I0112 17:19:23.370324 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 17:19:23.370337 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 17:19:23.370347 12987 solver.cpp:244] Train net output #2: prob = 2.30844 (* 1 = 2.30844 loss) +I0112 17:19:23.626379 12987 sgd_solver.cpp:106] Iteration 156000, lr = 0.1 +I0112 17:28:10.073850 12987 solver.cpp:228] Iteration 157000, loss = 2.23971 +I0112 17:28:10.074174 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.3125 +I0112 17:28:10.074220 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.59375 +I0112 17:28:10.074239 12987 solver.cpp:244] Train net output #2: prob = 3.17533 (* 1 = 3.17533 loss) +I0112 17:28:10.325618 12987 sgd_solver.cpp:106] Iteration 157000, lr = 0.1 +I0112 17:32:20.254119 12987 blocking_queue.cpp:50] Data layer prefetch queue empty +I0112 17:36:58.423919 12987 solver.cpp:228] Iteration 158000, loss = 2.24138 +I0112 17:36:58.424201 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 17:36:58.424221 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 17:36:58.424238 12987 solver.cpp:244] Train net output #2: prob = 2.42489 (* 1 = 2.42489 loss) +I0112 17:36:58.679448 12987 sgd_solver.cpp:106] Iteration 158000, lr = 0.1 +I0112 17:45:45.151669 12987 solver.cpp:228] Iteration 159000, loss = 2.262 +I0112 17:45:45.151931 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 17:45:45.151968 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 17:45:45.151989 12987 solver.cpp:244] Train net output #2: prob = 2.18752 (* 1 = 2.18752 loss) +I0112 17:45:45.406786 12987 sgd_solver.cpp:106] Iteration 159000, lr = 0.1 +I0112 17:54:31.609122 12987 solver.cpp:228] Iteration 160000, loss = 2.23921 +I0112 17:54:31.609391 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 17:54:31.609423 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 17:54:31.609434 12987 solver.cpp:244] Train net output #2: prob = 1.6948 (* 1 = 1.6948 loss) +I0112 17:54:31.863894 12987 sgd_solver.cpp:106] Iteration 160000, lr = 0.1 +I0112 18:03:18.097326 12987 solver.cpp:228] Iteration 161000, loss = 2.23069 +I0112 18:03:18.097581 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 18:03:18.097622 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 18:03:18.097642 12987 solver.cpp:244] Train net output #2: prob = 1.90311 (* 1 = 1.90311 loss) +I0112 18:03:18.351641 12987 sgd_solver.cpp:106] Iteration 161000, lr = 0.1 +I0112 18:12:05.425719 12987 solver.cpp:228] Iteration 162000, loss = 2.22075 +I0112 18:12:05.425971 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 18:12:05.426005 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 18:12:05.426031 12987 solver.cpp:244] Train net output #2: prob = 1.57024 (* 1 = 1.57024 loss) +I0112 18:12:05.680507 12987 sgd_solver.cpp:106] Iteration 162000, lr = 0.1 +I0112 18:20:52.603303 12987 solver.cpp:228] Iteration 163000, loss = 2.23788 +I0112 18:20:52.603642 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 18:20:52.603688 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 18:20:52.603713 12987 solver.cpp:244] Train net output #2: prob = 1.9525 (* 1 = 1.9525 loss) +I0112 18:20:52.856578 12987 sgd_solver.cpp:106] Iteration 163000, lr = 0.1 +I0112 18:29:39.401543 12987 solver.cpp:228] Iteration 164000, loss = 2.25076 +I0112 18:29:39.401782 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 18:29:39.401821 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 18:29:39.401865 12987 solver.cpp:244] Train net output #2: prob = 1.93902 (* 1 = 1.93902 loss) +I0112 18:29:39.649955 12987 sgd_solver.cpp:106] Iteration 164000, lr = 0.1 +I0112 18:38:26.364612 12987 solver.cpp:228] Iteration 165000, loss = 2.21224 +I0112 18:38:26.364913 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 18:38:26.364956 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 18:38:26.365725 12987 solver.cpp:244] Train net output #2: prob = 2.75829 (* 1 = 2.75829 loss) +I0112 18:38:26.619012 12987 sgd_solver.cpp:106] Iteration 165000, lr = 0.1 +I0112 18:47:13.526703 12987 solver.cpp:228] Iteration 166000, loss = 2.24084 +I0112 18:47:13.526882 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 18:47:13.526893 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.59375 +I0112 18:47:13.526904 12987 solver.cpp:244] Train net output #2: prob = 3.16969 (* 1 = 3.16969 loss) +I0112 18:47:13.780925 12987 sgd_solver.cpp:106] Iteration 166000, lr = 0.1 +I0112 18:56:00.508070 12987 solver.cpp:228] Iteration 167000, loss = 2.23438 +I0112 18:56:00.508311 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 18:56:00.508345 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 18:56:00.508363 12987 solver.cpp:244] Train net output #2: prob = 2.02317 (* 1 = 2.02317 loss) +I0112 18:56:00.761546 12987 sgd_solver.cpp:106] Iteration 167000, lr = 0.1 +I0112 19:04:47.469717 12987 solver.cpp:228] Iteration 168000, loss = 2.23265 +I0112 19:04:47.469987 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.3125 +I0112 19:04:47.470021 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 19:04:47.470041 12987 solver.cpp:244] Train net output #2: prob = 2.65999 (* 1 = 2.65999 loss) +I0112 19:04:47.726712 12987 sgd_solver.cpp:106] Iteration 168000, lr = 0.1 +I0112 19:13:34.570389 12987 solver.cpp:228] Iteration 169000, loss = 2.23872 +I0112 19:13:34.570664 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.28125 +I0112 19:13:34.570699 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 19:13:34.570724 12987 solver.cpp:244] Train net output #2: prob = 2.51616 (* 1 = 2.51616 loss) +I0112 19:13:34.827081 12987 sgd_solver.cpp:106] Iteration 169000, lr = 0.1 +I0112 19:22:21.344687 12987 solver.cpp:228] Iteration 170000, loss = 2.23729 +I0112 19:22:21.344933 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 19:22:21.344971 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 19:22:21.344996 12987 solver.cpp:244] Train net output #2: prob = 2.09772 (* 1 = 2.09772 loss) +I0112 19:22:21.600188 12987 sgd_solver.cpp:106] Iteration 170000, lr = 0.1 +I0112 19:31:08.131498 12987 solver.cpp:228] Iteration 171000, loss = 2.21785 +I0112 19:31:08.131727 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 19:31:08.131765 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 19:31:08.131791 12987 solver.cpp:244] Train net output #2: prob = 2.32285 (* 1 = 2.32285 loss) +I0112 19:31:08.385099 12987 sgd_solver.cpp:106] Iteration 171000, lr = 0.1 +I0112 19:39:55.116266 12987 solver.cpp:228] Iteration 172000, loss = 2.21121 +I0112 19:39:55.116536 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 19:39:55.116578 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 19:39:55.116601 12987 solver.cpp:244] Train net output #2: prob = 1.71931 (* 1 = 1.71931 loss) +I0112 19:39:55.371865 12987 sgd_solver.cpp:106] Iteration 172000, lr = 0.1 +I0112 19:48:41.766317 12987 solver.cpp:228] Iteration 173000, loss = 2.23112 +I0112 19:48:41.766558 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 19:48:41.766574 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 19:48:41.766589 12987 solver.cpp:244] Train net output #2: prob = 2.06887 (* 1 = 2.06887 loss) +I0112 19:48:42.022305 12987 sgd_solver.cpp:106] Iteration 173000, lr = 0.1 +I0112 19:57:28.595808 12987 solver.cpp:228] Iteration 174000, loss = 2.22304 +I0112 19:57:28.596082 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 19:57:28.596119 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 19:57:28.596149 12987 solver.cpp:244] Train net output #2: prob = 2.17473 (* 1 = 2.17473 loss) +I0112 19:57:28.840057 12987 sgd_solver.cpp:106] Iteration 174000, lr = 0.1 +I0112 20:06:15.919075 12987 solver.cpp:228] Iteration 175000, loss = 2.2269 +I0112 20:06:15.919275 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 20:06:15.919286 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 20:06:15.919297 12987 solver.cpp:244] Train net output #2: prob = 2.32127 (* 1 = 2.32127 loss) +I0112 20:06:16.174266 12987 sgd_solver.cpp:106] Iteration 175000, lr = 0.1 +I0112 20:15:03.022523 12987 solver.cpp:228] Iteration 176000, loss = 2.22206 +I0112 20:15:03.022789 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 20:15:03.022821 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 20:15:03.022845 12987 solver.cpp:244] Train net output #2: prob = 2.64561 (* 1 = 2.64561 loss) +I0112 20:15:03.277752 12987 sgd_solver.cpp:106] Iteration 176000, lr = 0.1 +I0112 20:23:50.004133 12987 solver.cpp:228] Iteration 177000, loss = 2.22569 +I0112 20:23:50.004335 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 20:23:50.004348 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 20:23:50.004359 12987 solver.cpp:244] Train net output #2: prob = 2.64515 (* 1 = 2.64515 loss) +I0112 20:23:50.258927 12987 sgd_solver.cpp:106] Iteration 177000, lr = 0.1 +I0112 20:32:36.717548 12987 solver.cpp:228] Iteration 178000, loss = 2.22535 +I0112 20:32:36.717799 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 20:32:36.717839 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 20:32:36.717860 12987 solver.cpp:244] Train net output #2: prob = 2.48002 (* 1 = 2.48002 loss) +I0112 20:32:36.973338 12987 sgd_solver.cpp:106] Iteration 178000, lr = 0.1 +I0112 20:41:23.808528 12987 solver.cpp:228] Iteration 179000, loss = 2.22694 +I0112 20:41:23.808781 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 20:41:23.808794 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 20:41:23.808806 12987 solver.cpp:244] Train net output #2: prob = 1.48403 (* 1 = 1.48403 loss) +I0112 20:41:24.064630 12987 sgd_solver.cpp:106] Iteration 179000, lr = 0.1 +I0112 20:50:11.241657 12987 solver.cpp:228] Iteration 180000, loss = 2.23273 +I0112 20:50:11.241822 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0112 20:50:11.241832 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 20:50:11.241842 12987 solver.cpp:244] Train net output #2: prob = 2.10911 (* 1 = 2.10911 loss) +I0112 20:50:11.495096 12987 sgd_solver.cpp:106] Iteration 180000, lr = 0.1 +I0112 20:58:58.343050 12987 solver.cpp:228] Iteration 181000, loss = 2.22624 +I0112 20:58:58.343236 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 20:58:58.343246 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 20:58:58.343256 12987 solver.cpp:244] Train net output #2: prob = 2.71637 (* 1 = 2.71637 loss) +I0112 20:58:58.597745 12987 sgd_solver.cpp:106] Iteration 181000, lr = 0.1 +I0112 21:07:45.377406 12987 solver.cpp:228] Iteration 182000, loss = 2.21254 +I0112 21:07:45.377624 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 21:07:45.377655 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 21:07:45.377673 12987 solver.cpp:244] Train net output #2: prob = 2.5992 (* 1 = 2.5992 loss) +I0112 21:07:45.635169 12987 sgd_solver.cpp:106] Iteration 182000, lr = 0.1 +I0112 21:16:31.925580 12987 solver.cpp:228] Iteration 183000, loss = 2.22148 +I0112 21:16:31.925794 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 21:16:31.925820 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 21:16:31.925834 12987 solver.cpp:244] Train net output #2: prob = 2.49552 (* 1 = 2.49552 loss) +I0112 21:16:32.174618 12987 sgd_solver.cpp:106] Iteration 183000, lr = 0.1 +I0112 21:25:19.239408 12987 solver.cpp:228] Iteration 184000, loss = 2.22487 +I0112 21:25:19.239634 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 21:25:19.239646 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 21:25:19.239660 12987 solver.cpp:244] Train net output #2: prob = 2.26215 (* 1 = 2.26215 loss) +I0112 21:25:19.492418 12987 sgd_solver.cpp:106] Iteration 184000, lr = 0.1 +I0112 21:34:06.029064 12987 solver.cpp:228] Iteration 185000, loss = 2.24241 +I0112 21:34:06.029237 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 21:34:06.029250 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 21:34:06.029260 12987 solver.cpp:244] Train net output #2: prob = 1.99023 (* 1 = 1.99023 loss) +I0112 21:34:06.282366 12987 sgd_solver.cpp:106] Iteration 185000, lr = 0.1 +I0112 21:42:53.012181 12987 solver.cpp:228] Iteration 186000, loss = 2.21918 +I0112 21:42:53.012406 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 21:42:53.012419 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 21:42:53.012431 12987 solver.cpp:244] Train net output #2: prob = 2.25155 (* 1 = 2.25155 loss) +I0112 21:42:53.268012 12987 sgd_solver.cpp:106] Iteration 186000, lr = 0.1 +I0112 21:51:40.109216 12987 solver.cpp:228] Iteration 187000, loss = 2.2126 +I0112 21:51:40.110075 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 21:51:40.110100 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0112 21:51:40.110118 12987 solver.cpp:244] Train net output #2: prob = 1.70565 (* 1 = 1.70565 loss) +I0112 21:51:40.358079 12987 sgd_solver.cpp:106] Iteration 187000, lr = 0.1 +I0112 22:00:26.816074 12987 solver.cpp:228] Iteration 188000, loss = 2.22652 +I0112 22:00:26.816295 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.375 +I0112 22:00:26.816324 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 22:00:26.816337 12987 solver.cpp:244] Train net output #2: prob = 2.61961 (* 1 = 2.61961 loss) +I0112 22:00:27.068269 12987 sgd_solver.cpp:106] Iteration 188000, lr = 0.1 +I0112 22:09:13.479290 12987 solver.cpp:228] Iteration 189000, loss = 2.23272 +I0112 22:09:13.479487 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 22:09:13.479503 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 22:09:13.479514 12987 solver.cpp:244] Train net output #2: prob = 2.30275 (* 1 = 2.30275 loss) +I0112 22:09:13.734151 12987 sgd_solver.cpp:106] Iteration 189000, lr = 0.1 +I0112 22:18:00.175112 12987 solver.cpp:228] Iteration 190000, loss = 2.24449 +I0112 22:18:00.175310 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.4375 +I0112 22:18:00.175325 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.625 +I0112 22:18:00.175340 12987 solver.cpp:244] Train net output #2: prob = 2.41891 (* 1 = 2.41891 loss) +I0112 22:18:00.428985 12987 sgd_solver.cpp:106] Iteration 190000, lr = 0.1 +I0112 22:26:47.064260 12987 solver.cpp:228] Iteration 191000, loss = 2.22347 +I0112 22:26:47.064522 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 22:26:47.064559 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0112 22:26:47.064579 12987 solver.cpp:244] Train net output #2: prob = 2.33023 (* 1 = 2.33023 loss) +I0112 22:26:47.316308 12987 sgd_solver.cpp:106] Iteration 191000, lr = 0.1 +I0112 22:35:33.897855 12987 solver.cpp:228] Iteration 192000, loss = 2.22599 +I0112 22:35:33.898119 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 22:35:33.898156 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 22:35:33.898177 12987 solver.cpp:244] Train net output #2: prob = 1.7642 (* 1 = 1.7642 loss) +I0112 22:35:34.146144 12987 sgd_solver.cpp:106] Iteration 192000, lr = 0.1 +I0112 22:44:20.929759 12987 solver.cpp:228] Iteration 193000, loss = 2.23788 +I0112 22:44:20.929988 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 22:44:20.930027 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 22:44:20.930049 12987 solver.cpp:244] Train net output #2: prob = 2.69118 (* 1 = 2.69118 loss) +I0112 22:44:21.185017 12987 sgd_solver.cpp:106] Iteration 193000, lr = 0.1 +I0112 22:53:07.657626 12987 solver.cpp:228] Iteration 194000, loss = 2.24469 +I0112 22:53:07.657826 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0112 22:53:07.657842 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 22:53:07.657853 12987 solver.cpp:244] Train net output #2: prob = 2.10772 (* 1 = 2.10772 loss) +I0112 22:53:07.908141 12987 sgd_solver.cpp:106] Iteration 194000, lr = 0.1 +I0112 23:01:54.364357 12987 solver.cpp:228] Iteration 195000, loss = 2.24796 +I0112 23:01:54.364562 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0112 23:01:54.364573 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0112 23:01:54.364583 12987 solver.cpp:244] Train net output #2: prob = 1.53431 (* 1 = 1.53431 loss) +I0112 23:01:54.619704 12987 sgd_solver.cpp:106] Iteration 195000, lr = 0.1 +I0112 23:10:40.694676 12987 solver.cpp:228] Iteration 196000, loss = 2.23263 +I0112 23:10:40.694922 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0112 23:10:40.694960 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 23:10:40.694986 12987 solver.cpp:244] Train net output #2: prob = 2.6705 (* 1 = 2.6705 loss) +I0112 23:10:40.948436 12987 sgd_solver.cpp:106] Iteration 196000, lr = 0.1 +I0112 23:19:27.181155 12987 solver.cpp:228] Iteration 197000, loss = 2.22751 +I0112 23:19:27.181411 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0112 23:19:27.181439 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0112 23:19:27.181458 12987 solver.cpp:244] Train net output #2: prob = 1.67056 (* 1 = 1.67056 loss) +I0112 23:19:27.436877 12987 sgd_solver.cpp:106] Iteration 197000, lr = 0.1 +I0112 23:28:13.955291 12987 solver.cpp:228] Iteration 198000, loss = 2.22621 +I0112 23:28:13.955528 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0112 23:28:13.955554 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0112 23:28:13.955566 12987 solver.cpp:244] Train net output #2: prob = 1.81598 (* 1 = 1.81598 loss) +I0112 23:28:14.209297 12987 sgd_solver.cpp:106] Iteration 198000, lr = 0.1 +I0112 23:37:00.579843 12987 solver.cpp:228] Iteration 199000, loss = 2.23285 +I0112 23:37:00.580171 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 23:37:00.580211 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.6875 +I0112 23:37:00.580238 12987 solver.cpp:244] Train net output #2: prob = 2.52206 (* 1 = 2.52206 loss) +I0112 23:37:00.830571 12987 sgd_solver.cpp:106] Iteration 199000, lr = 0.1 +I0112 23:45:47.151382 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_200000.caffemodel +I0112 23:45:48.174933 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_200000.solverstate +I0112 23:45:48.609079 12987 solver.cpp:228] Iteration 200000, loss = 2.22318 +I0112 23:45:48.609122 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0112 23:45:48.609129 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0112 23:45:48.609136 12987 solver.cpp:244] Train net output #2: prob = 2.76963 (* 1 = 2.76963 loss) +I0112 23:45:48.852538 12987 sgd_solver.cpp:106] Iteration 200000, lr = 0.01 +I0112 23:54:35.215541 12987 solver.cpp:228] Iteration 201000, loss = 1.81441 +I0112 23:54:35.215813 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0112 23:54:35.215854 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0112 23:54:35.215880 12987 solver.cpp:244] Train net output #2: prob = 1.96793 (* 1 = 1.96793 loss) +I0112 23:54:35.472406 12987 sgd_solver.cpp:106] Iteration 201000, lr = 0.01 +I0113 00:03:22.089547 12987 solver.cpp:228] Iteration 202000, loss = 1.64136 +I0113 00:03:22.089788 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 00:03:22.089803 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 00:03:22.089818 12987 solver.cpp:244] Train net output #2: prob = 1.49606 (* 1 = 1.49606 loss) +I0113 00:03:22.341858 12987 sgd_solver.cpp:106] Iteration 202000, lr = 0.01 +I0113 00:12:08.837126 12987 solver.cpp:228] Iteration 203000, loss = 1.60189 +I0113 00:12:08.837313 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0113 00:12:08.837342 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0113 00:12:08.837353 12987 solver.cpp:244] Train net output #2: prob = 1.89173 (* 1 = 1.89173 loss) +I0113 00:12:09.086400 12987 sgd_solver.cpp:106] Iteration 203000, lr = 0.01 +I0113 00:20:55.212898 12987 solver.cpp:228] Iteration 204000, loss = 1.56743 +I0113 00:20:55.213109 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 00:20:55.213122 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 00:20:55.213135 12987 solver.cpp:244] Train net output #2: prob = 1.47127 (* 1 = 1.47127 loss) +I0113 00:20:55.466387 12987 sgd_solver.cpp:106] Iteration 204000, lr = 0.01 +I0113 00:29:42.133960 12987 solver.cpp:228] Iteration 205000, loss = 1.51792 +I0113 00:29:42.134183 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 00:29:42.134209 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 00:29:42.134227 12987 solver.cpp:244] Train net output #2: prob = 1.62584 (* 1 = 1.62584 loss) +I0113 00:29:42.383626 12987 sgd_solver.cpp:106] Iteration 205000, lr = 0.01 +I0113 00:38:28.776542 12987 solver.cpp:228] Iteration 206000, loss = 1.47259 +I0113 00:38:28.776779 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0113 00:38:28.776818 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 00:38:28.776837 12987 solver.cpp:244] Train net output #2: prob = 0.817923 (* 1 = 0.817923 loss) +I0113 00:38:29.029305 12987 sgd_solver.cpp:106] Iteration 206000, lr = 0.01 +I0113 00:47:15.267413 12987 solver.cpp:228] Iteration 207000, loss = 1.43707 +I0113 00:47:15.267616 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 00:47:15.267627 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 00:47:15.267638 12987 solver.cpp:244] Train net output #2: prob = 0.85557 (* 1 = 0.85557 loss) +I0113 00:47:15.519078 12987 sgd_solver.cpp:106] Iteration 207000, lr = 0.01 +I0113 00:56:01.497581 12987 solver.cpp:228] Iteration 208000, loss = 1.39063 +I0113 00:56:01.497778 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 00:56:01.497818 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0113 00:56:01.497843 12987 solver.cpp:244] Train net output #2: prob = 1.34547 (* 1 = 1.34547 loss) +I0113 00:56:01.750046 12987 sgd_solver.cpp:106] Iteration 208000, lr = 0.01 +I0113 01:04:47.771159 12987 solver.cpp:228] Iteration 209000, loss = 1.32543 +I0113 01:04:47.771423 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 01:04:47.771461 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 01:04:47.771486 12987 solver.cpp:244] Train net output #2: prob = 1.34541 (* 1 = 1.34541 loss) +I0113 01:04:48.017604 12987 sgd_solver.cpp:106] Iteration 209000, lr = 0.01 +I0113 01:13:34.121582 12987 solver.cpp:228] Iteration 210000, loss = 1.23869 +I0113 01:13:34.121840 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 01:13:34.121881 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 01:13:34.121901 12987 solver.cpp:244] Train net output #2: prob = 1.10081 (* 1 = 1.10081 loss) +I0113 01:13:34.372886 12987 sgd_solver.cpp:106] Iteration 210000, lr = 0.01 +I0113 01:22:20.412720 12987 solver.cpp:228] Iteration 211000, loss = 1.43567 +I0113 01:22:20.412941 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0113 01:22:20.412955 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 01:22:20.412968 12987 solver.cpp:244] Train net output #2: prob = 1.02423 (* 1 = 1.02423 loss) +I0113 01:22:20.665343 12987 sgd_solver.cpp:106] Iteration 211000, lr = 0.01 +I0113 01:31:07.032194 12987 solver.cpp:228] Iteration 212000, loss = 1.40918 +I0113 01:31:07.032413 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 01:31:07.032447 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0113 01:31:07.032466 12987 solver.cpp:244] Train net output #2: prob = 1.57452 (* 1 = 1.57452 loss) +I0113 01:31:07.286151 12987 sgd_solver.cpp:106] Iteration 212000, lr = 0.01 +I0113 01:39:53.469746 12987 solver.cpp:228] Iteration 213000, loss = 1.40367 +I0113 01:39:53.469946 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 01:39:53.469960 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0113 01:39:53.469974 12987 solver.cpp:244] Train net output #2: prob = 1.68598 (* 1 = 1.68598 loss) +I0113 01:39:53.726465 12987 sgd_solver.cpp:106] Iteration 213000, lr = 0.01 +I0113 01:48:39.644821 12987 solver.cpp:228] Iteration 214000, loss = 1.39045 +I0113 01:48:39.645061 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0113 01:48:39.645104 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0113 01:48:39.645131 12987 solver.cpp:244] Train net output #2: prob = 1.62549 (* 1 = 1.62549 loss) +I0113 01:48:39.894712 12987 sgd_solver.cpp:106] Iteration 214000, lr = 0.01 +I0113 01:57:26.076158 12987 solver.cpp:228] Iteration 215000, loss = 1.37774 +I0113 01:57:26.076360 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 01:57:26.076388 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 01:57:26.076401 12987 solver.cpp:244] Train net output #2: prob = 1.53451 (* 1 = 1.53451 loss) +I0113 01:57:26.326795 12987 sgd_solver.cpp:106] Iteration 215000, lr = 0.01 +I0113 02:06:12.428318 12987 solver.cpp:228] Iteration 216000, loss = 1.35392 +I0113 02:06:12.428589 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0113 02:06:12.428617 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 02:06:12.428633 12987 solver.cpp:244] Train net output #2: prob = 1.66692 (* 1 = 1.66692 loss) +I0113 02:06:12.680567 12987 sgd_solver.cpp:106] Iteration 216000, lr = 0.01 +I0113 02:14:58.928077 12987 solver.cpp:228] Iteration 217000, loss = 1.32665 +I0113 02:14:58.928294 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 02:14:58.928324 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 02:14:58.928340 12987 solver.cpp:244] Train net output #2: prob = 1.59927 (* 1 = 1.59927 loss) +I0113 02:14:59.181658 12987 sgd_solver.cpp:106] Iteration 217000, lr = 0.01 +I0113 02:23:45.161154 12987 solver.cpp:228] Iteration 218000, loss = 1.29828 +I0113 02:23:45.161460 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 02:23:45.161502 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 02:23:45.161530 12987 solver.cpp:244] Train net output #2: prob = 1.24164 (* 1 = 1.24164 loss) +I0113 02:23:45.413794 12987 sgd_solver.cpp:106] Iteration 218000, lr = 0.01 +I0113 02:32:31.955245 12987 solver.cpp:228] Iteration 219000, loss = 1.24661 +I0113 02:32:31.955441 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 02:32:31.955452 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 02:32:31.955464 12987 solver.cpp:244] Train net output #2: prob = 0.953608 (* 1 = 0.953608 loss) +I0113 02:32:32.209956 12987 sgd_solver.cpp:106] Iteration 219000, lr = 0.01 +I0113 02:41:18.606031 12987 solver.cpp:228] Iteration 220000, loss = 1.19983 +I0113 02:41:18.606346 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0113 02:41:18.606395 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 02:41:18.606422 12987 solver.cpp:244] Train net output #2: prob = 0.665044 (* 1 = 0.665044 loss) +I0113 02:41:18.863809 12987 sgd_solver.cpp:106] Iteration 220000, lr = 0.01 +I0113 02:50:05.460284 12987 solver.cpp:228] Iteration 221000, loss = 1.35877 +I0113 02:50:05.460582 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 02:50:05.460628 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0113 02:50:05.460654 12987 solver.cpp:244] Train net output #2: prob = 0.859143 (* 1 = 0.859143 loss) +I0113 02:50:05.717077 12987 sgd_solver.cpp:106] Iteration 221000, lr = 0.01 +I0113 02:58:52.272750 12987 solver.cpp:228] Iteration 222000, loss = 1.34939 +I0113 02:58:52.273028 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 02:58:52.273067 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 02:58:52.273087 12987 solver.cpp:244] Train net output #2: prob = 1.28318 (* 1 = 1.28318 loss) +I0113 02:58:52.525645 12987 sgd_solver.cpp:106] Iteration 222000, lr = 0.01 +I0113 03:07:39.497016 12987 solver.cpp:228] Iteration 223000, loss = 1.33374 +I0113 03:07:39.497228 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 03:07:39.497241 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 03:07:39.497254 12987 solver.cpp:244] Train net output #2: prob = 1.03466 (* 1 = 1.03466 loss) +I0113 03:07:39.747253 12987 sgd_solver.cpp:106] Iteration 223000, lr = 0.01 +I0113 03:16:26.236302 12987 solver.cpp:228] Iteration 224000, loss = 1.33009 +I0113 03:16:26.236567 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 03:16:26.236611 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 03:16:26.236637 12987 solver.cpp:244] Train net output #2: prob = 0.972648 (* 1 = 0.972648 loss) +I0113 03:16:26.488406 12987 sgd_solver.cpp:106] Iteration 224000, lr = 0.01 +I0113 03:25:12.775480 12987 solver.cpp:228] Iteration 225000, loss = 1.31773 +I0113 03:25:12.775665 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 03:25:12.775676 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 03:25:12.775687 12987 solver.cpp:244] Train net output #2: prob = 1.5967 (* 1 = 1.5967 loss) +I0113 03:25:13.030077 12987 sgd_solver.cpp:106] Iteration 225000, lr = 0.01 +I0113 03:33:59.204067 12987 solver.cpp:228] Iteration 226000, loss = 1.28698 +I0113 03:33:59.204349 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 03:33:59.204392 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 03:33:59.204416 12987 solver.cpp:244] Train net output #2: prob = 1.08313 (* 1 = 1.08313 loss) +I0113 03:33:59.459646 12987 sgd_solver.cpp:106] Iteration 226000, lr = 0.01 +I0113 03:42:45.791404 12987 solver.cpp:228] Iteration 227000, loss = 1.26875 +I0113 03:42:45.791640 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 03:42:45.791674 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 03:42:45.791698 12987 solver.cpp:244] Train net output #2: prob = 1.22193 (* 1 = 1.22193 loss) +I0113 03:42:46.045887 12987 sgd_solver.cpp:106] Iteration 227000, lr = 0.01 +I0113 03:51:32.331653 12987 solver.cpp:228] Iteration 228000, loss = 1.2431 +I0113 03:51:32.331945 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 03:51:32.331964 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 03:51:32.331998 12987 solver.cpp:244] Train net output #2: prob = 1.21946 (* 1 = 1.21946 loss) +I0113 03:51:32.585003 12987 sgd_solver.cpp:106] Iteration 228000, lr = 0.01 +I0113 04:00:19.219545 12987 solver.cpp:228] Iteration 229000, loss = 1.22813 +I0113 04:00:19.219753 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 04:00:19.219781 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 04:00:19.219794 12987 solver.cpp:244] Train net output #2: prob = 1.05878 (* 1 = 1.05878 loss) +I0113 04:00:19.474525 12987 sgd_solver.cpp:106] Iteration 229000, lr = 0.01 +I0113 04:09:05.693081 12987 solver.cpp:228] Iteration 230000, loss = 1.19176 +I0113 04:09:05.693296 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 04:09:05.693311 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 04:09:05.693325 12987 solver.cpp:244] Train net output #2: prob = 1.37122 (* 1 = 1.37122 loss) +I0113 04:09:05.946048 12987 sgd_solver.cpp:106] Iteration 230000, lr = 0.01 +I0113 04:17:52.389459 12987 solver.cpp:228] Iteration 231000, loss = 1.31255 +I0113 04:17:52.389698 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 04:17:52.389739 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 04:17:52.389765 12987 solver.cpp:244] Train net output #2: prob = 1.29195 (* 1 = 1.29195 loss) +I0113 04:17:52.637532 12987 sgd_solver.cpp:106] Iteration 231000, lr = 0.01 +I0113 04:26:39.166970 12987 solver.cpp:228] Iteration 232000, loss = 1.30271 +I0113 04:26:39.167237 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 04:26:39.167253 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 04:26:39.167266 12987 solver.cpp:244] Train net output #2: prob = 1.44261 (* 1 = 1.44261 loss) +I0113 04:26:39.421442 12987 sgd_solver.cpp:106] Iteration 232000, lr = 0.01 +I0113 04:35:25.871158 12987 solver.cpp:228] Iteration 233000, loss = 1.30962 +I0113 04:35:25.871387 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 04:35:25.871425 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 04:35:25.871444 12987 solver.cpp:244] Train net output #2: prob = 1.24858 (* 1 = 1.24858 loss) +I0113 04:35:26.121652 12987 sgd_solver.cpp:106] Iteration 233000, lr = 0.01 +I0113 04:44:12.278975 12987 solver.cpp:228] Iteration 234000, loss = 1.31456 +I0113 04:44:12.279188 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 04:44:12.279206 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 04:44:12.279223 12987 solver.cpp:244] Train net output #2: prob = 0.95054 (* 1 = 0.95054 loss) +I0113 04:44:12.526366 12987 sgd_solver.cpp:106] Iteration 234000, lr = 0.01 +I0113 04:52:59.003296 12987 solver.cpp:228] Iteration 235000, loss = 1.30003 +I0113 04:52:59.003526 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 04:52:59.003556 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 04:52:59.004197 12987 solver.cpp:244] Train net output #2: prob = 1.62236 (* 1 = 1.62236 loss) +I0113 04:52:59.253420 12987 sgd_solver.cpp:106] Iteration 235000, lr = 0.01 +I0113 05:01:45.522161 12987 solver.cpp:228] Iteration 236000, loss = 1.26287 +I0113 05:01:45.522460 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 05:01:45.522501 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 05:01:45.522531 12987 solver.cpp:244] Train net output #2: prob = 0.85389 (* 1 = 0.85389 loss) +I0113 05:01:45.775046 12987 sgd_solver.cpp:106] Iteration 236000, lr = 0.01 +I0113 05:10:31.943557 12987 solver.cpp:228] Iteration 237000, loss = 1.24723 +I0113 05:10:31.943765 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 05:10:31.943776 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 05:10:31.943789 12987 solver.cpp:244] Train net output #2: prob = 2.11464 (* 1 = 2.11464 loss) +I0113 05:10:32.197291 12987 sgd_solver.cpp:106] Iteration 237000, lr = 0.01 +I0113 05:19:18.387691 12987 solver.cpp:228] Iteration 238000, loss = 1.23776 +I0113 05:19:18.387902 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0113 05:19:18.387912 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 05:19:18.387923 12987 solver.cpp:244] Train net output #2: prob = 2.45971 (* 1 = 2.45971 loss) +I0113 05:19:18.640322 12987 sgd_solver.cpp:106] Iteration 238000, lr = 0.01 +I0113 05:28:04.804734 12987 solver.cpp:228] Iteration 239000, loss = 1.2213 +I0113 05:28:04.804992 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 05:28:04.805039 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 05:28:04.805089 12987 solver.cpp:244] Train net output #2: prob = 1.10488 (* 1 = 1.10488 loss) +I0113 05:28:05.059911 12987 sgd_solver.cpp:106] Iteration 239000, lr = 0.01 +I0113 05:36:51.215526 12987 solver.cpp:228] Iteration 240000, loss = 1.18713 +I0113 05:36:51.215749 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 05:36:51.215787 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 05:36:51.215811 12987 solver.cpp:244] Train net output #2: prob = 1.094 (* 1 = 1.094 loss) +I0113 05:36:51.468487 12987 sgd_solver.cpp:106] Iteration 240000, lr = 0.01 +I0113 05:45:37.323844 12987 solver.cpp:228] Iteration 241000, loss = 1.28348 +I0113 05:45:37.324072 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 05:45:37.324097 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 05:45:37.324108 12987 solver.cpp:244] Train net output #2: prob = 1.78872 (* 1 = 1.78872 loss) +I0113 05:45:37.576328 12987 sgd_solver.cpp:106] Iteration 241000, lr = 0.01 +I0113 05:54:23.614214 12987 solver.cpp:228] Iteration 242000, loss = 1.26155 +I0113 05:54:23.614430 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 05:54:23.614454 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 05:54:23.614483 12987 solver.cpp:244] Train net output #2: prob = 1.14595 (* 1 = 1.14595 loss) +I0113 05:54:23.868149 12987 sgd_solver.cpp:106] Iteration 242000, lr = 0.01 +I0113 06:03:09.885542 12987 solver.cpp:228] Iteration 243000, loss = 1.27098 +I0113 06:03:09.885795 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 06:03:09.885834 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 06:03:09.885856 12987 solver.cpp:244] Train net output #2: prob = 1.23865 (* 1 = 1.23865 loss) +I0113 06:03:10.140151 12987 sgd_solver.cpp:106] Iteration 243000, lr = 0.01 +I0113 06:11:55.816872 12987 solver.cpp:228] Iteration 244000, loss = 1.27582 +I0113 06:11:55.817117 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 06:11:55.817158 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 06:11:55.817203 12987 solver.cpp:244] Train net output #2: prob = 0.903844 (* 1 = 0.903844 loss) +I0113 06:11:56.067342 12987 sgd_solver.cpp:106] Iteration 244000, lr = 0.01 +I0113 06:20:41.996896 12987 solver.cpp:228] Iteration 245000, loss = 1.24795 +I0113 06:20:41.997105 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 06:20:41.997131 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 06:20:41.997143 12987 solver.cpp:244] Train net output #2: prob = 1.75664 (* 1 = 1.75664 loss) +I0113 06:20:42.247391 12987 sgd_solver.cpp:106] Iteration 245000, lr = 0.01 +I0113 06:29:27.525454 12987 solver.cpp:228] Iteration 246000, loss = 1.26031 +I0113 06:29:27.525672 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0113 06:29:27.525696 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 06:29:27.525712 12987 solver.cpp:244] Train net output #2: prob = 0.803063 (* 1 = 0.803063 loss) +I0113 06:29:27.779861 12987 sgd_solver.cpp:106] Iteration 246000, lr = 0.01 +I0113 06:38:13.367698 12987 solver.cpp:228] Iteration 247000, loss = 1.22792 +I0113 06:38:13.367945 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 06:38:13.367983 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 06:38:13.368023 12987 solver.cpp:244] Train net output #2: prob = 1.27622 (* 1 = 1.27622 loss) +I0113 06:38:13.620353 12987 sgd_solver.cpp:106] Iteration 247000, lr = 0.01 +I0113 06:46:59.310748 12987 solver.cpp:228] Iteration 248000, loss = 1.22755 +I0113 06:46:59.310961 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 06:46:59.310993 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 06:46:59.311014 12987 solver.cpp:244] Train net output #2: prob = 1.47023 (* 1 = 1.47023 loss) +I0113 06:46:59.561745 12987 sgd_solver.cpp:106] Iteration 248000, lr = 0.01 +I0113 06:55:45.281949 12987 solver.cpp:228] Iteration 249000, loss = 1.21218 +I0113 06:55:45.283423 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 06:55:45.283447 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 06:55:45.283463 12987 solver.cpp:244] Train net output #2: prob = 1.2407 (* 1 = 1.2407 loss) +I0113 06:55:45.539278 12987 sgd_solver.cpp:106] Iteration 249000, lr = 0.01 +I0113 07:04:30.810657 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_250000.caffemodel +I0113 07:04:33.112649 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_250000.solverstate +I0113 07:04:33.520584 12987 solver.cpp:228] Iteration 250000, loss = 1.19218 +I0113 07:04:33.520622 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 07:04:33.520628 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 07:04:33.520638 12987 solver.cpp:244] Train net output #2: prob = 0.870448 (* 1 = 0.870448 loss) +I0113 07:04:33.760628 12987 sgd_solver.cpp:106] Iteration 250000, lr = 0.01 +I0113 07:13:19.657833 12987 solver.cpp:228] Iteration 251000, loss = 1.24747 +I0113 07:13:19.658061 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 07:13:19.658095 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0113 07:13:19.658107 12987 solver.cpp:244] Train net output #2: prob = 0.74993 (* 1 = 0.74993 loss) +I0113 07:13:19.911877 12987 sgd_solver.cpp:106] Iteration 251000, lr = 0.01 +I0113 07:22:05.575304 12987 solver.cpp:228] Iteration 252000, loss = 1.25184 +I0113 07:22:05.575575 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 07:22:05.575618 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 07:22:05.575637 12987 solver.cpp:244] Train net output #2: prob = 0.990222 (* 1 = 0.990222 loss) +I0113 07:22:05.825117 12987 sgd_solver.cpp:106] Iteration 252000, lr = 0.01 +I0113 07:30:51.565696 12987 solver.cpp:228] Iteration 253000, loss = 1.25514 +I0113 07:30:51.565932 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 07:30:51.565968 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0113 07:30:51.565996 12987 solver.cpp:244] Train net output #2: prob = 1.22691 (* 1 = 1.22691 loss) +I0113 07:30:51.820338 12987 sgd_solver.cpp:106] Iteration 253000, lr = 0.01 +I0113 07:39:37.766911 12987 solver.cpp:228] Iteration 254000, loss = 1.25293 +I0113 07:39:37.767159 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 07:39:37.767189 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 07:39:37.767199 12987 solver.cpp:244] Train net output #2: prob = 1.60885 (* 1 = 1.60885 loss) +I0113 07:39:38.020870 12987 sgd_solver.cpp:106] Iteration 254000, lr = 0.01 +I0113 07:48:23.785349 12987 solver.cpp:228] Iteration 255000, loss = 1.25176 +I0113 07:48:23.785584 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 07:48:23.785609 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 07:48:23.785627 12987 solver.cpp:244] Train net output #2: prob = 1.3191 (* 1 = 1.3191 loss) +I0113 07:48:24.035126 12987 sgd_solver.cpp:106] Iteration 255000, lr = 0.01 +I0113 07:57:09.926131 12987 solver.cpp:228] Iteration 256000, loss = 1.25095 +I0113 07:57:09.926398 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 07:57:09.926443 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 07:57:09.926462 12987 solver.cpp:244] Train net output #2: prob = 1.32173 (* 1 = 1.32173 loss) +I0113 07:57:10.179149 12987 sgd_solver.cpp:106] Iteration 256000, lr = 0.01 +I0113 08:05:55.788213 12987 solver.cpp:228] Iteration 257000, loss = 1.21804 +I0113 08:05:55.788496 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 08:05:55.788538 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 08:05:55.788565 12987 solver.cpp:244] Train net output #2: prob = 1.67004 (* 1 = 1.67004 loss) +I0113 08:05:56.045209 12987 sgd_solver.cpp:106] Iteration 257000, lr = 0.01 +I0113 08:14:42.008375 12987 solver.cpp:228] Iteration 258000, loss = 1.22726 +I0113 08:14:42.008641 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 08:14:42.008668 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 08:14:42.008694 12987 solver.cpp:244] Train net output #2: prob = 1.04847 (* 1 = 1.04847 loss) +I0113 08:14:42.260766 12987 sgd_solver.cpp:106] Iteration 258000, lr = 0.01 +I0113 08:23:27.794137 12987 solver.cpp:228] Iteration 259000, loss = 1.20506 +I0113 08:23:27.794363 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 08:23:27.794402 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 08:23:27.794422 12987 solver.cpp:244] Train net output #2: prob = 1.62555 (* 1 = 1.62555 loss) +I0113 08:23:28.046836 12987 sgd_solver.cpp:106] Iteration 259000, lr = 0.01 +I0113 08:32:13.909174 12987 solver.cpp:228] Iteration 260000, loss = 1.19341 +I0113 08:32:13.909376 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0113 08:32:13.909387 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 08:32:13.909401 12987 solver.cpp:244] Train net output #2: prob = 1.12087 (* 1 = 1.12087 loss) +I0113 08:32:14.164343 12987 sgd_solver.cpp:106] Iteration 260000, lr = 0.01 +I0113 08:41:00.103637 12987 solver.cpp:228] Iteration 261000, loss = 1.25345 +I0113 08:41:00.103919 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0113 08:41:00.103957 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0113 08:41:00.103978 12987 solver.cpp:244] Train net output #2: prob = 0.520669 (* 1 = 0.520669 loss) +I0113 08:41:00.353212 12987 sgd_solver.cpp:106] Iteration 261000, lr = 0.01 +I0113 08:49:46.052031 12987 solver.cpp:228] Iteration 262000, loss = 1.26582 +I0113 08:49:46.052258 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 08:49:46.052271 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 08:49:46.052287 12987 solver.cpp:244] Train net output #2: prob = 1.54288 (* 1 = 1.54288 loss) +I0113 08:49:46.304280 12987 sgd_solver.cpp:106] Iteration 262000, lr = 0.01 +I0113 08:58:32.324690 12987 solver.cpp:228] Iteration 263000, loss = 1.25156 +I0113 08:58:32.324892 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 08:58:32.324903 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 08:58:32.324914 12987 solver.cpp:244] Train net output #2: prob = 1.09032 (* 1 = 1.09032 loss) +I0113 08:58:32.576593 12987 sgd_solver.cpp:106] Iteration 263000, lr = 0.01 +I0113 09:07:18.470566 12987 solver.cpp:228] Iteration 264000, loss = 1.25275 +I0113 09:07:18.470732 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 09:07:18.470744 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0113 09:07:18.470754 12987 solver.cpp:244] Train net output #2: prob = 0.865524 (* 1 = 0.865524 loss) +I0113 09:07:18.723318 12987 sgd_solver.cpp:106] Iteration 264000, lr = 0.01 +I0113 09:16:04.970250 12987 solver.cpp:228] Iteration 265000, loss = 1.25583 +I0113 09:16:04.970512 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 09:16:04.970554 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 09:16:04.970579 12987 solver.cpp:244] Train net output #2: prob = 1.46396 (* 1 = 1.46396 loss) +I0113 09:16:05.222389 12987 sgd_solver.cpp:106] Iteration 265000, lr = 0.01 +I0113 09:24:51.219346 12987 solver.cpp:228] Iteration 266000, loss = 1.23323 +I0113 09:24:51.219630 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 09:24:51.219657 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 09:24:51.219672 12987 solver.cpp:244] Train net output #2: prob = 0.866596 (* 1 = 0.866596 loss) +I0113 09:24:51.473219 12987 sgd_solver.cpp:106] Iteration 266000, lr = 0.01 +I0113 09:33:37.298789 12987 solver.cpp:228] Iteration 267000, loss = 1.21543 +I0113 09:33:37.298956 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 09:33:37.298966 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 09:33:37.298977 12987 solver.cpp:244] Train net output #2: prob = 1.44336 (* 1 = 1.44336 loss) +I0113 09:33:37.553930 12987 sgd_solver.cpp:106] Iteration 267000, lr = 0.01 +I0113 09:42:23.821993 12987 solver.cpp:228] Iteration 268000, loss = 1.21569 +I0113 09:42:23.822782 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0113 09:42:23.822803 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 09:42:23.822823 12987 solver.cpp:244] Train net output #2: prob = 1.39739 (* 1 = 1.39739 loss) +I0113 09:42:24.072842 12987 sgd_solver.cpp:106] Iteration 268000, lr = 0.01 +I0113 09:51:10.033972 12987 solver.cpp:228] Iteration 269000, loss = 1.22219 +I0113 09:51:10.034219 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 09:51:10.034248 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 09:51:10.034260 12987 solver.cpp:244] Train net output #2: prob = 1.43177 (* 1 = 1.43177 loss) +I0113 09:51:10.288125 12987 sgd_solver.cpp:106] Iteration 269000, lr = 0.01 +I0113 09:59:56.344357 12987 solver.cpp:228] Iteration 270000, loss = 1.20636 +I0113 09:59:56.344559 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 09:59:56.344589 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 09:59:56.344605 12987 solver.cpp:244] Train net output #2: prob = 0.73236 (* 1 = 0.73236 loss) +I0113 09:59:56.598299 12987 sgd_solver.cpp:106] Iteration 270000, lr = 0.01 +I0113 10:08:42.477620 12987 solver.cpp:228] Iteration 271000, loss = 1.25091 +I0113 10:08:42.477831 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 10:08:42.477869 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 10:08:42.477892 12987 solver.cpp:244] Train net output #2: prob = 0.9692 (* 1 = 0.9692 loss) +I0113 10:08:42.727339 12987 sgd_solver.cpp:106] Iteration 271000, lr = 0.01 +I0113 10:17:28.063240 12987 solver.cpp:228] Iteration 272000, loss = 1.24825 +I0113 10:17:28.063426 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 10:17:28.063437 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 10:17:28.063447 12987 solver.cpp:244] Train net output #2: prob = 1.2951 (* 1 = 1.2951 loss) +I0113 10:17:28.315269 12987 sgd_solver.cpp:106] Iteration 272000, lr = 0.01 +I0113 10:26:14.125900 12987 solver.cpp:228] Iteration 273000, loss = 1.26352 +I0113 10:26:14.126106 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 10:26:14.126116 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0113 10:26:14.126127 12987 solver.cpp:244] Train net output #2: prob = 1.10784 (* 1 = 1.10784 loss) +I0113 10:26:14.380060 12987 sgd_solver.cpp:106] Iteration 273000, lr = 0.01 +I0113 10:35:00.589061 12987 solver.cpp:228] Iteration 274000, loss = 1.26934 +I0113 10:35:00.589316 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 10:35:00.589346 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 10:35:00.589362 12987 solver.cpp:244] Train net output #2: prob = 1.59843 (* 1 = 1.59843 loss) +I0113 10:35:00.844221 12987 sgd_solver.cpp:106] Iteration 274000, lr = 0.01 +I0113 10:43:47.058524 12987 solver.cpp:228] Iteration 275000, loss = 1.25683 +I0113 10:43:47.058786 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 10:43:47.058821 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 10:43:47.058845 12987 solver.cpp:244] Train net output #2: prob = 1.02752 (* 1 = 1.02752 loss) +I0113 10:43:47.312755 12987 sgd_solver.cpp:106] Iteration 275000, lr = 0.01 +I0113 10:52:33.877833 12987 solver.cpp:228] Iteration 276000, loss = 1.24735 +I0113 10:52:33.878108 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 10:52:33.878144 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 10:52:33.878186 12987 solver.cpp:244] Train net output #2: prob = 1.16413 (* 1 = 1.16413 loss) +I0113 10:52:34.130184 12987 sgd_solver.cpp:106] Iteration 276000, lr = 0.01 +I0113 11:01:19.918644 12987 solver.cpp:228] Iteration 277000, loss = 1.23251 +I0113 11:01:19.918860 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 11:01:19.918889 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 11:01:19.918903 12987 solver.cpp:244] Train net output #2: prob = 0.894585 (* 1 = 0.894585 loss) +I0113 11:01:20.171687 12987 sgd_solver.cpp:106] Iteration 277000, lr = 0.01 +I0113 11:10:06.389178 12987 solver.cpp:228] Iteration 278000, loss = 1.21942 +I0113 11:10:06.389385 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 11:10:06.389396 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 11:10:06.389406 12987 solver.cpp:244] Train net output #2: prob = 1.11574 (* 1 = 1.11574 loss) +I0113 11:10:06.644887 12987 sgd_solver.cpp:106] Iteration 278000, lr = 0.01 +I0113 11:18:52.549687 12987 solver.cpp:228] Iteration 279000, loss = 1.22528 +I0113 11:18:52.549955 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 11:18:52.549993 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 11:18:52.550019 12987 solver.cpp:244] Train net output #2: prob = 1.23603 (* 1 = 1.23603 loss) +I0113 11:18:52.803555 12987 sgd_solver.cpp:106] Iteration 279000, lr = 0.01 +I0113 11:27:38.682302 12987 solver.cpp:228] Iteration 280000, loss = 1.19511 +I0113 11:27:38.682482 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 11:27:38.682492 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 11:27:38.682502 12987 solver.cpp:244] Train net output #2: prob = 1.80496 (* 1 = 1.80496 loss) +I0113 11:27:38.933605 12987 sgd_solver.cpp:106] Iteration 280000, lr = 0.01 +I0113 11:36:24.956691 12987 solver.cpp:228] Iteration 281000, loss = 1.23171 +I0113 11:36:24.957006 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 11:36:24.957034 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 11:36:24.957046 12987 solver.cpp:244] Train net output #2: prob = 1.51849 (* 1 = 1.51849 loss) +I0113 11:36:25.211081 12987 sgd_solver.cpp:106] Iteration 281000, lr = 0.01 +I0113 11:45:10.770249 12987 solver.cpp:228] Iteration 282000, loss = 1.23134 +I0113 11:45:10.770550 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 11:45:10.770594 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0113 11:45:10.770620 12987 solver.cpp:244] Train net output #2: prob = 1.78849 (* 1 = 1.78849 loss) +I0113 11:45:11.023458 12987 sgd_solver.cpp:106] Iteration 282000, lr = 0.01 +I0113 11:53:57.000624 12987 solver.cpp:228] Iteration 283000, loss = 1.23974 +I0113 11:53:57.000835 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0113 11:53:57.000885 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 11:53:57.000901 12987 solver.cpp:244] Train net output #2: prob = 1.9392 (* 1 = 1.9392 loss) +I0113 11:53:57.256047 12987 sgd_solver.cpp:106] Iteration 283000, lr = 0.01 +I0113 12:02:42.883471 12987 solver.cpp:228] Iteration 284000, loss = 1.25207 +I0113 12:02:42.883679 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 12:02:42.883692 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 12:02:42.883703 12987 solver.cpp:244] Train net output #2: prob = 1.2289 (* 1 = 1.2289 loss) +I0113 12:02:43.130710 12987 sgd_solver.cpp:106] Iteration 284000, lr = 0.01 +I0113 12:11:28.834215 12987 solver.cpp:228] Iteration 285000, loss = 1.23107 +I0113 12:11:28.834528 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 12:11:28.834574 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 12:11:28.834599 12987 solver.cpp:244] Train net output #2: prob = 1.52869 (* 1 = 1.52869 loss) +I0113 12:11:29.088292 12987 sgd_solver.cpp:106] Iteration 285000, lr = 0.01 +I0113 12:20:15.494184 12987 solver.cpp:228] Iteration 286000, loss = 1.2357 +I0113 12:20:15.494398 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0113 12:20:15.494410 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0113 12:20:15.494421 12987 solver.cpp:244] Train net output #2: prob = 1.71322 (* 1 = 1.71322 loss) +I0113 12:20:15.748373 12987 sgd_solver.cpp:106] Iteration 286000, lr = 0.01 +I0113 12:29:02.122472 12987 solver.cpp:228] Iteration 287000, loss = 1.21345 +I0113 12:29:02.122715 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 12:29:02.122752 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 12:29:02.122771 12987 solver.cpp:244] Train net output #2: prob = 1.44987 (* 1 = 1.44987 loss) +I0113 12:29:02.377635 12987 sgd_solver.cpp:106] Iteration 287000, lr = 0.01 +I0113 12:37:48.413942 12987 solver.cpp:228] Iteration 288000, loss = 1.21529 +I0113 12:37:48.414192 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 12:37:48.414219 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 12:37:48.414238 12987 solver.cpp:244] Train net output #2: prob = 1.65056 (* 1 = 1.65056 loss) +I0113 12:37:48.666592 12987 sgd_solver.cpp:106] Iteration 288000, lr = 0.01 +I0113 12:46:34.725132 12987 solver.cpp:228] Iteration 289000, loss = 1.20359 +I0113 12:46:34.725338 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 12:46:34.725350 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 12:46:34.725363 12987 solver.cpp:244] Train net output #2: prob = 1.54142 (* 1 = 1.54142 loss) +I0113 12:46:34.981392 12987 sgd_solver.cpp:106] Iteration 289000, lr = 0.01 +I0113 12:55:20.927970 12987 solver.cpp:228] Iteration 290000, loss = 1.19962 +I0113 12:55:20.928212 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 12:55:20.928248 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 12:55:20.928272 12987 solver.cpp:244] Train net output #2: prob = 1.08725 (* 1 = 1.08725 loss) +I0113 12:55:21.182312 12987 sgd_solver.cpp:106] Iteration 290000, lr = 0.01 +I0113 13:04:07.747860 12987 solver.cpp:228] Iteration 291000, loss = 1.2233 +I0113 13:04:07.748105 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 13:04:07.748137 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 13:04:07.748157 12987 solver.cpp:244] Train net output #2: prob = 1.16549 (* 1 = 1.16549 loss) +I0113 13:04:07.992666 12987 sgd_solver.cpp:106] Iteration 291000, lr = 0.01 +I0113 13:12:54.319555 12987 solver.cpp:228] Iteration 292000, loss = 1.23683 +I0113 13:12:54.319753 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 13:12:54.319764 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 13:12:54.319777 12987 solver.cpp:244] Train net output #2: prob = 1.44235 (* 1 = 1.44235 loss) +I0113 13:12:54.573596 12987 sgd_solver.cpp:106] Iteration 292000, lr = 0.01 +I0113 13:21:40.767418 12987 solver.cpp:228] Iteration 293000, loss = 1.23333 +I0113 13:21:40.767709 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 13:21:40.767756 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 13:21:40.767786 12987 solver.cpp:244] Train net output #2: prob = 1.6433 (* 1 = 1.6433 loss) +I0113 13:21:41.019278 12987 sgd_solver.cpp:106] Iteration 293000, lr = 0.01 +I0113 13:30:27.144979 12987 solver.cpp:228] Iteration 294000, loss = 1.2312 +I0113 13:30:27.145309 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 13:30:27.145356 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 13:30:27.145380 12987 solver.cpp:244] Train net output #2: prob = 1.58549 (* 1 = 1.58549 loss) +I0113 13:30:27.387085 12987 sgd_solver.cpp:106] Iteration 294000, lr = 0.01 +I0113 13:39:13.397939 12987 solver.cpp:228] Iteration 295000, loss = 1.23164 +I0113 13:39:13.398197 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 13:39:13.398236 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 13:39:13.398273 12987 solver.cpp:244] Train net output #2: prob = 0.901861 (* 1 = 0.901861 loss) +I0113 13:39:13.653273 12987 sgd_solver.cpp:106] Iteration 295000, lr = 0.01 +I0113 13:47:59.542991 12987 solver.cpp:228] Iteration 296000, loss = 1.23499 +I0113 13:47:59.543241 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 13:47:59.543267 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 13:47:59.543278 12987 solver.cpp:244] Train net output #2: prob = 1.02794 (* 1 = 1.02794 loss) +I0113 13:47:59.793212 12987 sgd_solver.cpp:106] Iteration 296000, lr = 0.01 +I0113 13:56:46.638969 12987 solver.cpp:228] Iteration 297000, loss = 1.22102 +I0113 13:56:46.639257 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 13:56:46.639297 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 13:56:46.639317 12987 solver.cpp:244] Train net output #2: prob = 1.23941 (* 1 = 1.23941 loss) +I0113 13:56:46.893441 12987 sgd_solver.cpp:106] Iteration 297000, lr = 0.01 +I0113 14:05:33.214902 12987 solver.cpp:228] Iteration 298000, loss = 1.22251 +I0113 14:05:33.215241 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 14:05:33.215283 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 14:05:33.215309 12987 solver.cpp:244] Train net output #2: prob = 0.791349 (* 1 = 0.791349 loss) +I0113 14:05:33.471515 12987 sgd_solver.cpp:106] Iteration 298000, lr = 0.01 +I0113 14:14:19.826686 12987 solver.cpp:228] Iteration 299000, loss = 1.20224 +I0113 14:14:19.826895 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.40625 +I0113 14:14:19.826906 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0113 14:14:19.826918 12987 solver.cpp:244] Train net output #2: prob = 2.6968 (* 1 = 2.6968 loss) +I0113 14:14:20.079358 12987 sgd_solver.cpp:106] Iteration 299000, lr = 0.01 +I0113 14:23:06.402837 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_300000.caffemodel +I0113 14:23:08.439616 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_300000.solverstate +I0113 14:23:08.845746 12987 solver.cpp:228] Iteration 300000, loss = 1.20895 +I0113 14:23:08.845796 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 14:23:08.845803 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 14:23:08.845811 12987 solver.cpp:244] Train net output #2: prob = 1.33592 (* 1 = 1.33592 loss) +I0113 14:23:09.091737 12987 sgd_solver.cpp:106] Iteration 300000, lr = 0.01 +I0113 14:31:55.853754 12987 solver.cpp:228] Iteration 301000, loss = 1.22676 +I0113 14:31:55.853991 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 14:31:55.854023 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 14:31:55.854044 12987 solver.cpp:244] Train net output #2: prob = 1.26651 (* 1 = 1.26651 loss) +I0113 14:31:56.105810 12987 sgd_solver.cpp:106] Iteration 301000, lr = 0.01 +I0113 14:40:42.537041 12987 solver.cpp:228] Iteration 302000, loss = 1.23223 +I0113 14:40:42.537251 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 14:40:42.537262 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 14:40:42.537274 12987 solver.cpp:244] Train net output #2: prob = 0.944487 (* 1 = 0.944487 loss) +I0113 14:40:42.792796 12987 sgd_solver.cpp:106] Iteration 302000, lr = 0.01 +I0113 14:49:29.088863 12987 solver.cpp:228] Iteration 303000, loss = 1.21813 +I0113 14:49:29.089097 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 14:49:29.089110 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0113 14:49:29.089125 12987 solver.cpp:244] Train net output #2: prob = 1.59069 (* 1 = 1.59069 loss) +I0113 14:49:29.341650 12987 sgd_solver.cpp:106] Iteration 303000, lr = 0.01 +I0113 14:58:15.624565 12987 solver.cpp:228] Iteration 304000, loss = 1.22212 +I0113 14:58:15.624771 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 14:58:15.624797 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 14:58:15.624812 12987 solver.cpp:244] Train net output #2: prob = 1.34007 (* 1 = 1.34007 loss) +I0113 14:58:15.879252 12987 sgd_solver.cpp:106] Iteration 304000, lr = 0.01 +I0113 15:07:02.355546 12987 solver.cpp:228] Iteration 305000, loss = 1.23268 +I0113 15:07:02.355836 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 15:07:02.355875 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 15:07:02.355898 12987 solver.cpp:244] Train net output #2: prob = 0.934368 (* 1 = 0.934368 loss) +I0113 15:07:02.609004 12987 sgd_solver.cpp:106] Iteration 305000, lr = 0.01 +I0113 15:15:49.252491 12987 solver.cpp:228] Iteration 306000, loss = 1.22235 +I0113 15:15:49.252753 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 15:15:49.252791 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 15:15:49.252813 12987 solver.cpp:244] Train net output #2: prob = 0.672921 (* 1 = 0.672921 loss) +I0113 15:15:49.506537 12987 sgd_solver.cpp:106] Iteration 306000, lr = 0.01 +I0113 15:24:35.774852 12987 solver.cpp:228] Iteration 307000, loss = 1.20382 +I0113 15:24:35.775085 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 15:24:35.775118 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 15:24:35.775135 12987 solver.cpp:244] Train net output #2: prob = 1.05013 (* 1 = 1.05013 loss) +I0113 15:24:36.027549 12987 sgd_solver.cpp:106] Iteration 307000, lr = 0.01 +I0113 15:33:22.811774 12987 solver.cpp:228] Iteration 308000, loss = 1.20555 +I0113 15:33:22.812024 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 15:33:22.812067 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 15:33:22.812090 12987 solver.cpp:244] Train net output #2: prob = 1.44018 (* 1 = 1.44018 loss) +I0113 15:33:23.067042 12987 sgd_solver.cpp:106] Iteration 308000, lr = 0.01 +I0113 15:42:09.582458 12987 solver.cpp:228] Iteration 309000, loss = 1.20114 +I0113 15:42:09.582741 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 15:42:09.582784 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 15:42:09.582811 12987 solver.cpp:244] Train net output #2: prob = 1.35641 (* 1 = 1.35641 loss) +I0113 15:42:09.833171 12987 sgd_solver.cpp:106] Iteration 309000, lr = 0.01 +I0113 15:50:56.780671 12987 solver.cpp:228] Iteration 310000, loss = 1.20264 +I0113 15:50:56.780879 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 15:50:56.780889 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 15:50:56.780900 12987 solver.cpp:244] Train net output #2: prob = 1.13025 (* 1 = 1.13025 loss) +I0113 15:50:57.034526 12987 sgd_solver.cpp:106] Iteration 310000, lr = 0.01 +I0113 15:59:43.743618 12987 solver.cpp:228] Iteration 311000, loss = 1.22017 +I0113 15:59:43.743863 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 15:59:43.743899 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 15:59:43.743921 12987 solver.cpp:244] Train net output #2: prob = 1.22157 (* 1 = 1.22157 loss) +I0113 15:59:43.996212 12987 sgd_solver.cpp:106] Iteration 311000, lr = 0.01 +I0113 16:08:30.158540 12987 solver.cpp:228] Iteration 312000, loss = 1.22589 +I0113 16:08:30.158766 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0113 16:08:30.158778 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 16:08:30.158793 12987 solver.cpp:244] Train net output #2: prob = 1.09854 (* 1 = 1.09854 loss) +I0113 16:08:30.409873 12987 sgd_solver.cpp:106] Iteration 312000, lr = 0.01 +I0113 16:17:17.259048 12987 solver.cpp:228] Iteration 313000, loss = 1.23562 +I0113 16:17:17.259320 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 16:17:17.259362 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 16:17:17.259382 12987 solver.cpp:244] Train net output #2: prob = 1.02349 (* 1 = 1.02349 loss) +I0113 16:17:17.515362 12987 sgd_solver.cpp:106] Iteration 313000, lr = 0.01 +I0113 16:26:03.837018 12987 solver.cpp:228] Iteration 314000, loss = 1.25397 +I0113 16:26:03.837182 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 16:26:03.837193 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 16:26:03.837203 12987 solver.cpp:244] Train net output #2: prob = 1.27746 (* 1 = 1.27746 loss) +I0113 16:26:04.090847 12987 sgd_solver.cpp:106] Iteration 314000, lr = 0.01 +I0113 16:34:50.622135 12987 solver.cpp:228] Iteration 315000, loss = 1.23668 +I0113 16:34:50.622493 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 16:34:50.622532 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.71875 +I0113 16:34:50.622557 12987 solver.cpp:244] Train net output #2: prob = 1.84027 (* 1 = 1.84027 loss) +I0113 16:34:50.878451 12987 sgd_solver.cpp:106] Iteration 315000, lr = 0.01 +I0113 16:43:37.070957 12987 solver.cpp:228] Iteration 316000, loss = 1.22738 +I0113 16:43:37.071208 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 16:43:37.071225 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 16:43:37.071247 12987 solver.cpp:244] Train net output #2: prob = 1.0361 (* 1 = 1.0361 loss) +I0113 16:43:37.323792 12987 sgd_solver.cpp:106] Iteration 316000, lr = 0.01 +I0113 16:52:23.738976 12987 solver.cpp:228] Iteration 317000, loss = 1.21211 +I0113 16:52:23.739213 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 16:52:23.739231 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 16:52:23.739251 12987 solver.cpp:244] Train net output #2: prob = 1.14199 (* 1 = 1.14199 loss) +I0113 16:52:23.999377 12987 sgd_solver.cpp:106] Iteration 317000, lr = 0.01 +I0113 17:01:10.480005 12987 solver.cpp:228] Iteration 318000, loss = 1.20657 +I0113 17:01:10.480203 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 17:01:10.480213 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 17:01:10.480224 12987 solver.cpp:244] Train net output #2: prob = 1.17945 (* 1 = 1.17945 loss) +I0113 17:01:10.733697 12987 sgd_solver.cpp:106] Iteration 318000, lr = 0.01 +I0113 17:09:56.941123 12987 solver.cpp:228] Iteration 319000, loss = 1.20635 +I0113 17:09:56.941341 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0113 17:09:56.941365 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 17:09:56.941373 12987 solver.cpp:244] Train net output #2: prob = 0.67229 (* 1 = 0.67229 loss) +I0113 17:09:57.192453 12987 sgd_solver.cpp:106] Iteration 319000, lr = 0.01 +I0113 17:18:43.059006 12987 solver.cpp:228] Iteration 320000, loss = 1.17924 +I0113 17:18:43.059226 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 17:18:43.059239 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 17:18:43.059252 12987 solver.cpp:244] Train net output #2: prob = 1.21536 (* 1 = 1.21536 loss) +I0113 17:18:43.313714 12987 sgd_solver.cpp:106] Iteration 320000, lr = 0.01 +I0113 17:27:29.402705 12987 solver.cpp:228] Iteration 321000, loss = 1.21659 +I0113 17:27:29.403017 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 17:27:29.403056 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 17:27:29.403090 12987 solver.cpp:244] Train net output #2: prob = 1.31428 (* 1 = 1.31428 loss) +I0113 17:27:29.660598 12987 sgd_solver.cpp:106] Iteration 321000, lr = 0.01 +I0113 17:36:15.801506 12987 solver.cpp:228] Iteration 322000, loss = 1.20008 +I0113 17:36:15.801697 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 17:36:15.801709 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 17:36:15.801719 12987 solver.cpp:244] Train net output #2: prob = 1.42944 (* 1 = 1.42944 loss) +I0113 17:36:16.055821 12987 sgd_solver.cpp:106] Iteration 322000, lr = 0.01 +I0113 17:45:02.616139 12987 solver.cpp:228] Iteration 323000, loss = 1.20574 +I0113 17:45:02.616350 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 17:45:02.616371 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 17:45:02.616390 12987 solver.cpp:244] Train net output #2: prob = 1.2128 (* 1 = 1.2128 loss) +I0113 17:45:02.867318 12987 sgd_solver.cpp:106] Iteration 323000, lr = 0.01 +I0113 17:53:49.413987 12987 solver.cpp:228] Iteration 324000, loss = 1.21885 +I0113 17:53:49.414258 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 17:53:49.414302 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 17:53:49.414321 12987 solver.cpp:244] Train net output #2: prob = 1.04829 (* 1 = 1.04829 loss) +I0113 17:53:49.659776 12987 sgd_solver.cpp:106] Iteration 324000, lr = 0.01 +I0113 18:02:35.666582 12987 solver.cpp:228] Iteration 325000, loss = 1.20578 +I0113 18:02:35.666879 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 18:02:35.666920 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 18:02:35.666940 12987 solver.cpp:244] Train net output #2: prob = 1.25949 (* 1 = 1.25949 loss) +I0113 18:02:35.920660 12987 sgd_solver.cpp:106] Iteration 325000, lr = 0.01 +I0113 18:11:22.026690 12987 solver.cpp:228] Iteration 326000, loss = 1.20756 +I0113 18:11:22.026969 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 18:11:22.027009 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 18:11:22.027026 12987 solver.cpp:244] Train net output #2: prob = 1.54684 (* 1 = 1.54684 loss) +I0113 18:11:22.278034 12987 sgd_solver.cpp:106] Iteration 326000, lr = 0.01 +I0113 18:20:08.540285 12987 solver.cpp:228] Iteration 327000, loss = 1.19152 +I0113 18:20:08.540796 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 18:20:08.540817 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 18:20:08.540835 12987 solver.cpp:244] Train net output #2: prob = 1.9424 (* 1 = 1.9424 loss) +I0113 18:20:08.791200 12987 sgd_solver.cpp:106] Iteration 327000, lr = 0.01 +I0113 18:28:55.000605 12987 solver.cpp:228] Iteration 328000, loss = 1.19689 +I0113 18:28:55.000885 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 18:28:55.000910 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 18:28:55.000926 12987 solver.cpp:244] Train net output #2: prob = 1.35998 (* 1 = 1.35998 loss) +I0113 18:28:55.254614 12987 sgd_solver.cpp:106] Iteration 328000, lr = 0.01 +I0113 18:37:41.153784 12987 solver.cpp:228] Iteration 329000, loss = 1.18806 +I0113 18:37:41.154018 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 18:37:41.154052 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 18:37:41.154072 12987 solver.cpp:244] Train net output #2: prob = 0.976341 (* 1 = 0.976341 loss) +I0113 18:37:41.408751 12987 sgd_solver.cpp:106] Iteration 329000, lr = 0.01 +I0113 18:46:27.273695 12987 solver.cpp:228] Iteration 330000, loss = 1.19397 +I0113 18:46:27.273954 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.53125 +I0113 18:46:27.273996 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 18:46:27.274018 12987 solver.cpp:244] Train net output #2: prob = 1.69404 (* 1 = 1.69404 loss) +I0113 18:46:27.523717 12987 sgd_solver.cpp:106] Iteration 330000, lr = 0.01 +I0113 18:55:13.454375 12987 solver.cpp:228] Iteration 331000, loss = 1.19257 +I0113 18:55:13.454615 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0113 18:55:13.454627 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 18:55:13.454638 12987 solver.cpp:244] Train net output #2: prob = 1.86369 (* 1 = 1.86369 loss) +I0113 18:55:13.708479 12987 sgd_solver.cpp:106] Iteration 331000, lr = 0.01 +I0113 19:03:59.545953 12987 solver.cpp:228] Iteration 332000, loss = 1.19889 +I0113 19:03:59.546174 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 19:03:59.546200 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 19:03:59.546213 12987 solver.cpp:244] Train net output #2: prob = 0.950485 (* 1 = 0.950485 loss) +I0113 19:03:59.797423 12987 sgd_solver.cpp:106] Iteration 332000, lr = 0.01 +I0113 19:12:45.378072 12987 solver.cpp:228] Iteration 333000, loss = 1.20366 +I0113 19:12:45.378363 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 19:12:45.378409 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 19:12:45.378458 12987 solver.cpp:244] Train net output #2: prob = 1.46047 (* 1 = 1.46047 loss) +I0113 19:12:45.632436 12987 sgd_solver.cpp:106] Iteration 333000, lr = 0.01 +I0113 19:21:31.430539 12987 solver.cpp:228] Iteration 334000, loss = 1.2021 +I0113 19:21:31.430817 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 19:21:31.430855 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 19:21:31.430877 12987 solver.cpp:244] Train net output #2: prob = 1.12858 (* 1 = 1.12858 loss) +I0113 19:21:31.681341 12987 sgd_solver.cpp:106] Iteration 334000, lr = 0.01 +I0113 19:30:17.789579 12987 solver.cpp:228] Iteration 335000, loss = 1.19668 +I0113 19:30:17.789820 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0113 19:30:17.789856 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0113 19:30:17.789893 12987 solver.cpp:244] Train net output #2: prob = 1.57151 (* 1 = 1.57151 loss) +I0113 19:30:18.042345 12987 sgd_solver.cpp:106] Iteration 335000, lr = 0.01 +I0113 19:39:03.980819 12987 solver.cpp:228] Iteration 336000, loss = 1.2018 +I0113 19:39:03.981079 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0113 19:39:03.981106 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 19:39:03.981128 12987 solver.cpp:244] Train net output #2: prob = 1.29097 (* 1 = 1.29097 loss) +I0113 19:39:04.237478 12987 sgd_solver.cpp:106] Iteration 336000, lr = 0.01 +I0113 19:47:50.712486 12987 solver.cpp:228] Iteration 337000, loss = 1.19288 +I0113 19:47:50.712767 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0113 19:47:50.712805 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 19:47:50.712829 12987 solver.cpp:244] Train net output #2: prob = 1.59676 (* 1 = 1.59676 loss) +I0113 19:47:50.964398 12987 sgd_solver.cpp:106] Iteration 337000, lr = 0.01 +I0113 19:56:37.174288 12987 solver.cpp:228] Iteration 338000, loss = 1.20074 +I0113 19:56:37.174554 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 19:56:37.174590 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 19:56:37.174616 12987 solver.cpp:244] Train net output #2: prob = 1.23495 (* 1 = 1.23495 loss) +I0113 19:56:37.427999 12987 sgd_solver.cpp:106] Iteration 338000, lr = 0.01 +I0113 20:05:23.868813 12987 solver.cpp:228] Iteration 339000, loss = 1.18462 +I0113 20:05:23.869015 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 20:05:23.869026 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 20:05:23.869038 12987 solver.cpp:244] Train net output #2: prob = 1.33598 (* 1 = 1.33598 loss) +I0113 20:05:24.123564 12987 sgd_solver.cpp:106] Iteration 339000, lr = 0.01 +I0113 20:14:10.512673 12987 solver.cpp:228] Iteration 340000, loss = 1.18887 +I0113 20:14:10.512953 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 20:14:10.512997 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 20:14:10.513025 12987 solver.cpp:244] Train net output #2: prob = 1.77497 (* 1 = 1.77497 loss) +I0113 20:14:10.762217 12987 sgd_solver.cpp:106] Iteration 340000, lr = 0.01 +I0113 20:22:56.448987 12987 solver.cpp:228] Iteration 341000, loss = 1.19774 +I0113 20:22:56.449256 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 20:22:56.449283 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 20:22:56.449296 12987 solver.cpp:244] Train net output #2: prob = 1.40637 (* 1 = 1.40637 loss) +I0113 20:22:56.692337 12987 sgd_solver.cpp:106] Iteration 341000, lr = 0.01 +I0113 20:31:42.389142 12987 solver.cpp:228] Iteration 342000, loss = 1.21231 +I0113 20:31:42.389420 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 20:31:42.389449 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 20:31:42.389463 12987 solver.cpp:244] Train net output #2: prob = 0.679288 (* 1 = 0.679288 loss) +I0113 20:31:42.639739 12987 sgd_solver.cpp:106] Iteration 342000, lr = 0.01 +I0113 20:40:28.565279 12987 solver.cpp:228] Iteration 343000, loss = 1.19119 +I0113 20:40:28.565595 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0113 20:40:28.565639 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 20:40:28.565663 12987 solver.cpp:244] Train net output #2: prob = 1.4533 (* 1 = 1.4533 loss) +I0113 20:40:28.813170 12987 sgd_solver.cpp:106] Iteration 343000, lr = 0.01 +I0113 20:49:15.366886 12987 solver.cpp:228] Iteration 344000, loss = 1.20248 +I0113 20:49:15.367105 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 20:49:15.367128 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 20:49:15.367141 12987 solver.cpp:244] Train net output #2: prob = 1.32851 (* 1 = 1.32851 loss) +I0113 20:49:15.618392 12987 sgd_solver.cpp:106] Iteration 344000, lr = 0.01 +I0113 20:58:02.077545 12987 solver.cpp:228] Iteration 345000, loss = 1.20203 +I0113 20:58:02.077783 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 20:58:02.077795 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 20:58:02.077808 12987 solver.cpp:244] Train net output #2: prob = 0.966053 (* 1 = 0.966053 loss) +I0113 20:58:02.330416 12987 sgd_solver.cpp:106] Iteration 345000, lr = 0.01 +I0113 21:06:48.236043 12987 solver.cpp:228] Iteration 346000, loss = 1.18831 +I0113 21:06:48.236253 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 21:06:48.236269 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 21:06:48.236289 12987 solver.cpp:244] Train net output #2: prob = 1.0118 (* 1 = 1.0118 loss) +I0113 21:06:48.487489 12987 sgd_solver.cpp:106] Iteration 346000, lr = 0.01 +I0113 21:15:34.538763 12987 solver.cpp:228] Iteration 347000, loss = 1.17656 +I0113 21:15:34.538992 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0113 21:15:34.539017 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 21:15:34.539630 12987 solver.cpp:244] Train net output #2: prob = 1.44663 (* 1 = 1.44663 loss) +I0113 21:15:34.792381 12987 sgd_solver.cpp:106] Iteration 347000, lr = 0.01 +I0113 21:24:21.099125 12987 solver.cpp:228] Iteration 348000, loss = 1.17713 +I0113 21:24:21.099308 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0113 21:24:21.099320 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 21:24:21.099334 12987 solver.cpp:244] Train net output #2: prob = 1.46774 (* 1 = 1.46774 loss) +I0113 21:24:21.353935 12987 sgd_solver.cpp:106] Iteration 348000, lr = 0.01 +I0113 21:33:07.479156 12987 solver.cpp:228] Iteration 349000, loss = 1.18641 +I0113 21:33:07.479387 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5 +I0113 21:33:07.479398 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0113 21:33:07.479409 12987 solver.cpp:244] Train net output #2: prob = 2.00498 (* 1 = 2.00498 loss) +I0113 21:33:07.733928 12987 sgd_solver.cpp:106] Iteration 349000, lr = 0.01 +I0113 21:41:54.091446 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_350000.caffemodel +I0113 21:41:56.485255 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_350000.solverstate +I0113 21:41:57.023263 12987 solver.cpp:228] Iteration 350000, loss = 1.17623 +I0113 21:41:57.023298 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 21:41:57.023309 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 21:41:57.023319 12987 solver.cpp:244] Train net output #2: prob = 1.41413 (* 1 = 1.41413 loss) +I0113 21:41:57.269925 12987 sgd_solver.cpp:106] Iteration 350000, lr = 0.01 +I0113 21:50:43.266338 12987 solver.cpp:228] Iteration 351000, loss = 1.20045 +I0113 21:50:43.266639 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0113 21:50:43.266680 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 21:50:43.266697 12987 solver.cpp:244] Train net output #2: prob = 1.28779 (* 1 = 1.28779 loss) +I0113 21:50:43.519285 12987 sgd_solver.cpp:106] Iteration 351000, lr = 0.01 +I0113 21:59:29.514155 12987 solver.cpp:228] Iteration 352000, loss = 1.18941 +I0113 21:59:29.514456 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.46875 +I0113 21:59:29.514502 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0113 21:59:29.514529 12987 solver.cpp:244] Train net output #2: prob = 1.38707 (* 1 = 1.38707 loss) +I0113 21:59:29.769847 12987 sgd_solver.cpp:106] Iteration 352000, lr = 0.01 +I0113 22:08:16.171957 12987 solver.cpp:228] Iteration 353000, loss = 1.19434 +I0113 22:08:16.172421 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 22:08:16.172432 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0113 22:08:16.172443 12987 solver.cpp:244] Train net output #2: prob = 1.04121 (* 1 = 1.04121 loss) +I0113 22:08:16.417804 12987 sgd_solver.cpp:106] Iteration 353000, lr = 0.01 +I0113 22:17:06.128707 12987 solver.cpp:228] Iteration 354000, loss = 1.22007 +I0113 22:17:06.128904 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0113 22:17:06.128916 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 22:17:06.128928 12987 solver.cpp:244] Train net output #2: prob = 1.03501 (* 1 = 1.03501 loss) +I0113 22:17:06.379995 12987 sgd_solver.cpp:106] Iteration 354000, lr = 0.01 +I0113 22:25:52.049821 12987 solver.cpp:228] Iteration 355000, loss = 1.20108 +I0113 22:25:52.050113 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 22:25:52.050132 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 22:25:52.050153 12987 solver.cpp:244] Train net output #2: prob = 1.21334 (* 1 = 1.21334 loss) +I0113 22:25:52.305441 12987 sgd_solver.cpp:106] Iteration 355000, lr = 0.01 +I0113 22:34:40.756561 12987 solver.cpp:228] Iteration 356000, loss = 1.1968 +I0113 22:34:40.756767 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 22:34:40.756780 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 22:34:40.756793 12987 solver.cpp:244] Train net output #2: prob = 1.10302 (* 1 = 1.10302 loss) +I0113 22:34:41.019315 12987 sgd_solver.cpp:106] Iteration 356000, lr = 0.01 +I0113 22:43:32.814476 12987 solver.cpp:228] Iteration 357000, loss = 1.18389 +I0113 22:43:32.814731 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0113 22:43:32.814745 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 22:43:32.814761 12987 solver.cpp:244] Train net output #2: prob = 0.894181 (* 1 = 0.894181 loss) +I0113 22:43:33.124644 12987 sgd_solver.cpp:106] Iteration 357000, lr = 0.01 +I0113 22:52:20.676394 12987 solver.cpp:228] Iteration 358000, loss = 1.18206 +I0113 22:52:20.676709 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 22:52:20.676735 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 22:52:20.676746 12987 solver.cpp:244] Train net output #2: prob = 1.08495 (* 1 = 1.08495 loss) +I0113 22:52:20.930640 12987 sgd_solver.cpp:106] Iteration 358000, lr = 0.01 +I0113 23:01:06.499433 12987 solver.cpp:228] Iteration 359000, loss = 1.19499 +I0113 23:01:06.499639 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 23:01:06.499650 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 23:01:06.499660 12987 solver.cpp:244] Train net output #2: prob = 0.978576 (* 1 = 0.978576 loss) +I0113 23:01:06.753171 12987 sgd_solver.cpp:106] Iteration 359000, lr = 0.01 +I0113 23:09:52.386081 12987 solver.cpp:228] Iteration 360000, loss = 1.16172 +I0113 23:09:52.386320 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 23:09:52.386350 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 23:09:52.386368 12987 solver.cpp:244] Train net output #2: prob = 1.29528 (* 1 = 1.29528 loss) +I0113 23:09:52.637497 12987 sgd_solver.cpp:106] Iteration 360000, lr = 0.01 +I0113 23:18:38.471146 12987 solver.cpp:228] Iteration 361000, loss = 1.18676 +I0113 23:18:38.471484 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0113 23:18:38.471530 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0113 23:18:38.471549 12987 solver.cpp:244] Train net output #2: prob = 0.690934 (* 1 = 0.690934 loss) +I0113 23:18:38.724118 12987 sgd_solver.cpp:106] Iteration 361000, lr = 0.01 +I0113 23:27:24.760687 12987 solver.cpp:228] Iteration 362000, loss = 1.17688 +I0113 23:27:24.760895 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0113 23:27:24.760906 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 23:27:24.760920 12987 solver.cpp:244] Train net output #2: prob = 1.23414 (* 1 = 1.23414 loss) +I0113 23:27:25.015868 12987 sgd_solver.cpp:106] Iteration 362000, lr = 0.01 +I0113 23:36:10.923382 12987 solver.cpp:228] Iteration 363000, loss = 1.18151 +I0113 23:36:10.923632 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0113 23:36:10.923660 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0113 23:36:10.923676 12987 solver.cpp:244] Train net output #2: prob = 1.35978 (* 1 = 1.35978 loss) +I0113 23:36:11.177376 12987 sgd_solver.cpp:106] Iteration 363000, lr = 0.01 +I0113 23:44:56.876698 12987 solver.cpp:228] Iteration 364000, loss = 1.19253 +I0113 23:44:56.876909 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0113 23:44:56.876919 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0113 23:44:56.876932 12987 solver.cpp:244] Train net output #2: prob = 1.1376 (* 1 = 1.1376 loss) +I0113 23:44:57.130466 12987 sgd_solver.cpp:106] Iteration 364000, lr = 0.01 +I0113 23:53:42.919410 12987 solver.cpp:228] Iteration 365000, loss = 1.18895 +I0113 23:53:42.919628 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0113 23:53:42.919667 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0113 23:53:42.919687 12987 solver.cpp:244] Train net output #2: prob = 0.984967 (* 1 = 0.984967 loss) +I0113 23:53:43.172812 12987 sgd_solver.cpp:106] Iteration 365000, lr = 0.01 +I0114 00:02:28.771911 12987 solver.cpp:228] Iteration 366000, loss = 1.18172 +I0114 00:02:28.772114 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 00:02:28.772125 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 00:02:28.772136 12987 solver.cpp:244] Train net output #2: prob = 0.588618 (* 1 = 0.588618 loss) +I0114 00:02:29.027328 12987 sgd_solver.cpp:106] Iteration 366000, lr = 0.01 +I0114 00:11:14.980386 12987 solver.cpp:228] Iteration 367000, loss = 1.17724 +I0114 00:11:14.980633 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 00:11:14.980669 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 00:11:14.980695 12987 solver.cpp:244] Train net output #2: prob = 0.892042 (* 1 = 0.892042 loss) +I0114 00:11:15.233026 12987 sgd_solver.cpp:106] Iteration 367000, lr = 0.01 +I0114 00:20:01.080674 12987 solver.cpp:228] Iteration 368000, loss = 1.16846 +I0114 00:20:01.080927 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0114 00:20:01.080960 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 00:20:01.080979 12987 solver.cpp:244] Train net output #2: prob = 0.902843 (* 1 = 0.902843 loss) +I0114 00:20:01.333240 12987 sgd_solver.cpp:106] Iteration 368000, lr = 0.01 +I0114 00:28:47.099701 12987 solver.cpp:228] Iteration 369000, loss = 1.17062 +I0114 00:28:47.100016 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 00:28:47.100062 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0114 00:28:47.100095 12987 solver.cpp:244] Train net output #2: prob = 1.54784 (* 1 = 1.54784 loss) +I0114 00:28:47.348915 12987 sgd_solver.cpp:106] Iteration 369000, lr = 0.01 +I0114 00:37:35.473036 12987 solver.cpp:228] Iteration 370000, loss = 1.16515 +I0114 00:37:35.473219 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 00:37:35.473232 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 00:37:35.473247 12987 solver.cpp:244] Train net output #2: prob = 1.04917 (* 1 = 1.04917 loss) +I0114 00:37:35.725646 12987 sgd_solver.cpp:106] Iteration 370000, lr = 0.01 +I0114 00:46:26.943434 12987 solver.cpp:228] Iteration 371000, loss = 1.16598 +I0114 00:46:26.943619 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0114 00:46:26.943629 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 00:46:26.943640 12987 solver.cpp:244] Train net output #2: prob = 0.97833 (* 1 = 0.97833 loss) +I0114 00:46:27.203161 12987 sgd_solver.cpp:106] Iteration 371000, lr = 0.01 +I0114 00:55:19.947721 12987 solver.cpp:228] Iteration 372000, loss = 1.17441 +I0114 00:55:19.947960 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 00:55:19.947978 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 00:55:19.947991 12987 solver.cpp:244] Train net output #2: prob = 1.18041 (* 1 = 1.18041 loss) +I0114 00:55:20.202209 12987 sgd_solver.cpp:106] Iteration 372000, lr = 0.01 +I0114 01:04:05.769327 12987 solver.cpp:228] Iteration 373000, loss = 1.17959 +I0114 01:04:05.769578 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0114 01:04:05.769618 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.78125 +I0114 01:04:05.769640 12987 solver.cpp:244] Train net output #2: prob = 1.52907 (* 1 = 1.52907 loss) +I0114 01:04:06.021875 12987 sgd_solver.cpp:106] Iteration 373000, lr = 0.01 +I0114 01:12:50.857569 12987 solver.cpp:228] Iteration 374000, loss = 1.17581 +I0114 01:12:50.857787 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0114 01:12:50.857815 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.75 +I0114 01:12:50.857830 12987 solver.cpp:244] Train net output #2: prob = 1.91238 (* 1 = 1.91238 loss) +I0114 01:12:51.107883 12987 sgd_solver.cpp:106] Iteration 374000, lr = 0.01 +I0114 01:21:37.079597 12987 solver.cpp:228] Iteration 375000, loss = 1.1669 +I0114 01:21:37.079795 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0114 01:21:37.079818 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0114 01:21:37.079835 12987 solver.cpp:244] Train net output #2: prob = 1.46408 (* 1 = 1.46408 loss) +I0114 01:21:37.328301 12987 sgd_solver.cpp:106] Iteration 375000, lr = 0.01 +I0114 01:30:23.408346 12987 solver.cpp:228] Iteration 376000, loss = 1.18501 +I0114 01:30:23.408648 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 01:30:23.408699 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0114 01:30:23.408735 12987 solver.cpp:244] Train net output #2: prob = 1.23621 (* 1 = 1.23621 loss) +I0114 01:30:23.662675 12987 sgd_solver.cpp:106] Iteration 376000, lr = 0.01 +I0114 01:39:14.542415 12987 solver.cpp:228] Iteration 377000, loss = 1.16622 +I0114 01:39:14.542588 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 01:39:14.542598 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 01:39:14.542610 12987 solver.cpp:244] Train net output #2: prob = 0.618044 (* 1 = 0.618044 loss) +I0114 01:39:14.832198 12987 sgd_solver.cpp:106] Iteration 377000, lr = 0.01 +I0114 01:48:08.602169 12987 solver.cpp:228] Iteration 378000, loss = 1.17241 +I0114 01:48:08.602362 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 01:48:08.602375 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0114 01:48:08.602391 12987 solver.cpp:244] Train net output #2: prob = 1.08973 (* 1 = 1.08973 loss) +I0114 01:48:08.856638 12987 sgd_solver.cpp:106] Iteration 378000, lr = 0.01 +I0114 01:57:02.212498 12987 solver.cpp:228] Iteration 379000, loss = 1.15797 +I0114 01:57:02.212699 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 01:57:02.212731 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0114 01:57:02.212749 12987 solver.cpp:244] Train net output #2: prob = 1.14894 (* 1 = 1.14894 loss) +I0114 01:57:02.466472 12987 sgd_solver.cpp:106] Iteration 379000, lr = 0.01 +I0114 02:05:56.650827 12987 solver.cpp:228] Iteration 380000, loss = 1.16664 +I0114 02:05:56.651064 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 02:05:56.651093 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 02:05:56.651121 12987 solver.cpp:244] Train net output #2: prob = 0.794984 (* 1 = 0.794984 loss) +I0114 02:05:56.927989 12987 sgd_solver.cpp:106] Iteration 380000, lr = 0.01 +I0114 02:14:51.070118 12987 solver.cpp:228] Iteration 381000, loss = 1.16467 +I0114 02:14:51.070308 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0114 02:14:51.070322 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 02:14:51.070335 12987 solver.cpp:244] Train net output #2: prob = 1.07082 (* 1 = 1.07082 loss) +I0114 02:14:51.339771 12987 sgd_solver.cpp:106] Iteration 381000, lr = 0.01 +I0114 02:23:45.627683 12987 solver.cpp:228] Iteration 382000, loss = 1.18568 +I0114 02:23:45.627949 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0114 02:23:45.627988 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 02:23:45.628011 12987 solver.cpp:244] Train net output #2: prob = 0.891065 (* 1 = 0.891065 loss) +I0114 02:23:45.879992 12987 sgd_solver.cpp:106] Iteration 382000, lr = 0.01 +I0114 02:32:31.431298 12987 solver.cpp:228] Iteration 383000, loss = 1.16899 +I0114 02:32:31.431561 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 02:32:31.431594 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 02:32:31.431607 12987 solver.cpp:244] Train net output #2: prob = 0.95022 (* 1 = 0.95022 loss) +I0114 02:32:31.682003 12987 sgd_solver.cpp:106] Iteration 383000, lr = 0.01 +I0114 02:41:17.482736 12987 solver.cpp:228] Iteration 384000, loss = 1.17365 +I0114 02:41:17.482933 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 02:41:17.482942 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0114 02:41:17.482955 12987 solver.cpp:244] Train net output #2: prob = 1.3483 (* 1 = 1.3483 loss) +I0114 02:41:17.732691 12987 sgd_solver.cpp:106] Iteration 384000, lr = 0.01 +I0114 02:50:03.231555 12987 solver.cpp:228] Iteration 385000, loss = 1.18461 +I0114 02:50:03.231801 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0114 02:50:03.231827 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 02:50:03.231844 12987 solver.cpp:244] Train net output #2: prob = 1.07943 (* 1 = 1.07943 loss) +I0114 02:50:03.485395 12987 sgd_solver.cpp:106] Iteration 385000, lr = 0.01 +I0114 02:58:48.788916 12987 solver.cpp:228] Iteration 386000, loss = 1.17267 +I0114 02:58:48.789115 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 02:58:48.789127 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 02:58:48.789140 12987 solver.cpp:244] Train net output #2: prob = 0.738648 (* 1 = 0.738648 loss) +I0114 02:58:49.044143 12987 sgd_solver.cpp:106] Iteration 386000, lr = 0.01 +I0114 03:07:35.451333 12987 solver.cpp:228] Iteration 387000, loss = 1.15833 +I0114 03:07:35.451627 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 03:07:35.451656 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 03:07:35.451680 12987 solver.cpp:244] Train net output #2: prob = 1.05802 (* 1 = 1.05802 loss) +I0114 03:07:35.705253 12987 sgd_solver.cpp:106] Iteration 387000, lr = 0.01 +I0114 03:16:22.049583 12987 solver.cpp:228] Iteration 388000, loss = 1.15644 +I0114 03:16:22.049760 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0114 03:16:22.049772 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 03:16:22.049784 12987 solver.cpp:244] Train net output #2: prob = 1.18628 (* 1 = 1.18628 loss) +I0114 03:16:22.302914 12987 sgd_solver.cpp:106] Iteration 388000, lr = 0.01 +I0114 03:25:08.428876 12987 solver.cpp:228] Iteration 389000, loss = 1.16504 +I0114 03:25:08.429086 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 03:25:08.429098 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 03:25:08.429112 12987 solver.cpp:244] Train net output #2: prob = 0.978586 (* 1 = 0.978586 loss) +I0114 03:25:08.679755 12987 sgd_solver.cpp:106] Iteration 389000, lr = 0.01 +I0114 03:33:54.782196 12987 solver.cpp:228] Iteration 390000, loss = 1.16143 +I0114 03:33:54.782387 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 03:33:54.782397 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0114 03:33:54.782408 12987 solver.cpp:244] Train net output #2: prob = 1.33183 (* 1 = 1.33183 loss) +I0114 03:33:55.036320 12987 sgd_solver.cpp:106] Iteration 390000, lr = 0.01 +I0114 03:42:41.612180 12987 solver.cpp:228] Iteration 391000, loss = 1.17582 +I0114 03:42:41.612375 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 03:42:41.612383 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 03:42:41.612396 12987 solver.cpp:244] Train net output #2: prob = 1.15226 (* 1 = 1.15226 loss) +I0114 03:42:41.865420 12987 sgd_solver.cpp:106] Iteration 391000, lr = 0.01 +I0114 03:51:28.100635 12987 solver.cpp:228] Iteration 392000, loss = 1.17023 +I0114 03:51:28.100821 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 03:51:28.100831 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0114 03:51:28.100845 12987 solver.cpp:244] Train net output #2: prob = 1.66465 (* 1 = 1.66465 loss) +I0114 03:51:28.353438 12987 sgd_solver.cpp:106] Iteration 392000, lr = 0.01 +I0114 04:00:14.703369 12987 solver.cpp:228] Iteration 393000, loss = 1.17552 +I0114 04:00:14.703611 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 04:00:14.703642 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 04:00:14.703662 12987 solver.cpp:244] Train net output #2: prob = 0.630162 (* 1 = 0.630162 loss) +I0114 04:00:14.958369 12987 sgd_solver.cpp:106] Iteration 393000, lr = 0.01 +I0114 04:09:01.167640 12987 solver.cpp:228] Iteration 394000, loss = 1.1975 +I0114 04:09:01.167832 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 04:09:01.167843 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 04:09:01.167855 12987 solver.cpp:244] Train net output #2: prob = 0.911852 (* 1 = 0.911852 loss) +I0114 04:09:01.410310 12987 sgd_solver.cpp:106] Iteration 394000, lr = 0.01 +I0114 04:17:47.720736 12987 solver.cpp:228] Iteration 395000, loss = 1.18525 +I0114 04:17:47.721037 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0114 04:17:47.721076 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 04:17:47.721103 12987 solver.cpp:244] Train net output #2: prob = 1.14506 (* 1 = 1.14506 loss) +I0114 04:17:47.973076 12987 sgd_solver.cpp:106] Iteration 395000, lr = 0.01 +I0114 04:26:34.519573 12987 solver.cpp:228] Iteration 396000, loss = 1.17731 +I0114 04:26:34.519789 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 04:26:34.519800 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 04:26:34.519814 12987 solver.cpp:244] Train net output #2: prob = 0.946154 (* 1 = 0.946154 loss) +I0114 04:26:34.771831 12987 sgd_solver.cpp:106] Iteration 396000, lr = 0.01 +I0114 04:35:21.408164 12987 solver.cpp:228] Iteration 397000, loss = 1.16511 +I0114 04:35:21.408440 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0114 04:35:21.408480 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.84375 +I0114 04:35:21.408506 12987 solver.cpp:244] Train net output #2: prob = 1.60463 (* 1 = 1.60463 loss) +I0114 04:35:21.662286 12987 sgd_solver.cpp:106] Iteration 397000, lr = 0.01 +I0114 04:44:08.197440 12987 solver.cpp:228] Iteration 398000, loss = 1.15681 +I0114 04:44:08.197679 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0114 04:44:08.197690 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 04:44:08.197700 12987 solver.cpp:244] Train net output #2: prob = 1.06171 (* 1 = 1.06171 loss) +I0114 04:44:08.453649 12987 sgd_solver.cpp:106] Iteration 398000, lr = 0.01 +I0114 04:52:54.665856 12987 solver.cpp:228] Iteration 399000, loss = 1.16453 +I0114 04:52:54.666055 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 04:52:54.666066 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 04:52:54.666079 12987 solver.cpp:244] Train net output #2: prob = 0.931586 (* 1 = 0.931586 loss) +I0114 04:52:54.917178 12987 sgd_solver.cpp:106] Iteration 399000, lr = 0.01 +I0114 05:01:40.510112 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_400000.caffemodel +I0114 05:01:42.635093 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_400000.solverstate +I0114 05:01:43.043922 12987 solver.cpp:228] Iteration 400000, loss = 1.14885 +I0114 05:01:43.043998 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.5625 +I0114 05:01:43.044009 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.65625 +I0114 05:01:43.044039 12987 solver.cpp:244] Train net output #2: prob = 2.16626 (* 1 = 2.16626 loss) +I0114 05:01:43.290634 12987 sgd_solver.cpp:106] Iteration 400000, lr = 0.001 +I0114 05:10:29.713328 12987 solver.cpp:228] Iteration 401000, loss = 1.05628 +I0114 05:10:29.715931 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 05:10:29.715946 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 05:10:29.715962 12987 solver.cpp:244] Train net output #2: prob = 1.07328 (* 1 = 1.07328 loss) +I0114 05:10:29.969885 12987 sgd_solver.cpp:106] Iteration 401000, lr = 0.001 +I0114 05:19:16.203398 12987 solver.cpp:228] Iteration 402000, loss = 0.964927 +I0114 05:19:16.203660 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 05:19:16.203696 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 05:19:16.203737 12987 solver.cpp:244] Train net output #2: prob = 0.839686 (* 1 = 0.839686 loss) +I0114 05:19:16.443516 12987 sgd_solver.cpp:106] Iteration 402000, lr = 0.001 +I0114 05:28:02.995210 12987 solver.cpp:228] Iteration 403000, loss = 0.935103 +I0114 05:28:02.995476 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 05:28:02.995502 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 05:28:02.995527 12987 solver.cpp:244] Train net output #2: prob = 0.630573 (* 1 = 0.630573 loss) +I0114 05:28:03.246212 12987 sgd_solver.cpp:106] Iteration 403000, lr = 0.001 +I0114 05:36:49.250447 12987 solver.cpp:228] Iteration 404000, loss = 0.925927 +I0114 05:36:49.250748 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 05:36:49.250788 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 05:36:49.250816 12987 solver.cpp:244] Train net output #2: prob = 0.88765 (* 1 = 0.88765 loss) +I0114 05:36:49.505362 12987 sgd_solver.cpp:106] Iteration 404000, lr = 0.001 +I0114 05:45:35.833364 12987 solver.cpp:228] Iteration 405000, loss = 0.898958 +I0114 05:45:35.833570 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 05:45:35.833580 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 05:45:35.833593 12987 solver.cpp:244] Train net output #2: prob = 0.917638 (* 1 = 0.917638 loss) +I0114 05:45:36.085273 12987 sgd_solver.cpp:106] Iteration 405000, lr = 0.001 +I0114 05:54:22.774646 12987 solver.cpp:228] Iteration 406000, loss = 0.874181 +I0114 05:54:22.774876 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 05:54:22.774901 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0114 05:54:22.774919 12987 solver.cpp:244] Train net output #2: prob = 1.01912 (* 1 = 1.01912 loss) +I0114 05:54:23.028465 12987 sgd_solver.cpp:106] Iteration 406000, lr = 0.001 +I0114 06:03:09.466899 12987 solver.cpp:228] Iteration 407000, loss = 0.829775 +I0114 06:03:09.467125 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 06:03:09.467154 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 06:03:09.467169 12987 solver.cpp:244] Train net output #2: prob = 0.541307 (* 1 = 0.541307 loss) +I0114 06:03:09.721519 12987 sgd_solver.cpp:106] Iteration 407000, lr = 0.001 +I0114 06:11:56.005493 12987 solver.cpp:228] Iteration 408000, loss = 0.801841 +I0114 06:11:56.005756 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 06:11:56.005772 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 06:11:56.005790 12987 solver.cpp:244] Train net output #2: prob = 1.10712 (* 1 = 1.10712 loss) +I0114 06:11:56.259346 12987 sgd_solver.cpp:106] Iteration 408000, lr = 0.001 +I0114 06:20:42.716915 12987 solver.cpp:228] Iteration 409000, loss = 0.753016 +I0114 06:20:42.717146 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 06:20:42.717176 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 06:20:42.717188 12987 solver.cpp:244] Train net output #2: prob = 0.677079 (* 1 = 0.677079 loss) +I0114 06:20:42.971110 12987 sgd_solver.cpp:106] Iteration 409000, lr = 0.001 +I0114 06:29:29.854184 12987 solver.cpp:228] Iteration 410000, loss = 0.709026 +I0114 06:29:29.854457 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 06:29:29.854497 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 06:29:29.854521 12987 solver.cpp:244] Train net output #2: prob = 0.910912 (* 1 = 0.910912 loss) +I0114 06:29:30.105851 12987 sgd_solver.cpp:106] Iteration 410000, lr = 0.001 +I0114 06:38:16.453562 12987 solver.cpp:228] Iteration 411000, loss = 0.868935 +I0114 06:38:16.453780 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 06:38:16.453791 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0114 06:38:16.453805 12987 solver.cpp:244] Train net output #2: prob = 1.09668 (* 1 = 1.09668 loss) +I0114 06:38:16.705755 12987 sgd_solver.cpp:106] Iteration 411000, lr = 0.001 +I0114 06:47:02.904090 12987 solver.cpp:228] Iteration 412000, loss = 0.8577 +I0114 06:47:02.904412 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 06:47:02.904450 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 06:47:02.904476 12987 solver.cpp:244] Train net output #2: prob = 0.426709 (* 1 = 0.426709 loss) +I0114 06:47:03.157624 12987 sgd_solver.cpp:106] Iteration 412000, lr = 0.001 +I0114 06:55:49.283874 12987 solver.cpp:228] Iteration 413000, loss = 0.857873 +I0114 06:55:49.284162 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 06:55:49.284200 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 06:55:49.284230 12987 solver.cpp:244] Train net output #2: prob = 0.853786 (* 1 = 0.853786 loss) +I0114 06:55:49.538883 12987 sgd_solver.cpp:106] Iteration 413000, lr = 0.001 +I0114 07:04:35.939334 12987 solver.cpp:228] Iteration 414000, loss = 0.842306 +I0114 07:04:35.939625 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 07:04:35.939656 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 07:04:35.939674 12987 solver.cpp:244] Train net output #2: prob = 0.887048 (* 1 = 0.887048 loss) +I0114 07:04:36.191977 12987 sgd_solver.cpp:106] Iteration 414000, lr = 0.001 +I0114 07:13:22.833729 12987 solver.cpp:228] Iteration 415000, loss = 0.82297 +I0114 07:13:22.834056 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 07:13:22.834079 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 07:13:22.834100 12987 solver.cpp:244] Train net output #2: prob = 0.879075 (* 1 = 0.879075 loss) +I0114 07:13:23.089623 12987 sgd_solver.cpp:106] Iteration 415000, lr = 0.001 +I0114 07:22:09.297396 12987 solver.cpp:228] Iteration 416000, loss = 0.817709 +I0114 07:22:09.297649 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 07:22:09.297684 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 07:22:09.297714 12987 solver.cpp:244] Train net output #2: prob = 0.852745 (* 1 = 0.852745 loss) +I0114 07:22:09.549271 12987 sgd_solver.cpp:106] Iteration 416000, lr = 0.001 +I0114 07:30:55.577524 12987 solver.cpp:228] Iteration 417000, loss = 0.773206 +I0114 07:30:55.577828 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 07:30:55.577850 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 07:30:55.577863 12987 solver.cpp:244] Train net output #2: prob = 0.922448 (* 1 = 0.922448 loss) +I0114 07:30:55.828264 12987 sgd_solver.cpp:106] Iteration 417000, lr = 0.001 +I0114 07:39:42.356508 12987 solver.cpp:228] Iteration 418000, loss = 0.76755 +I0114 07:39:42.356765 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 07:39:42.356807 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 07:39:42.356837 12987 solver.cpp:244] Train net output #2: prob = 0.624385 (* 1 = 0.624385 loss) +I0114 07:39:42.609448 12987 sgd_solver.cpp:106] Iteration 418000, lr = 0.001 +I0114 07:48:28.483599 12987 solver.cpp:228] Iteration 419000, loss = 0.733555 +I0114 07:48:28.483819 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0114 07:48:28.483849 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 07:48:28.483862 12987 solver.cpp:244] Train net output #2: prob = 0.883067 (* 1 = 0.883067 loss) +I0114 07:48:28.735972 12987 sgd_solver.cpp:106] Iteration 419000, lr = 0.001 +I0114 07:57:14.544795 12987 solver.cpp:228] Iteration 420000, loss = 0.70002 +I0114 07:57:14.545045 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 07:57:14.545085 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 07:57:14.545109 12987 solver.cpp:244] Train net output #2: prob = 0.646571 (* 1 = 0.646571 loss) +I0114 07:57:14.790711 12987 sgd_solver.cpp:106] Iteration 420000, lr = 0.001 +I0114 08:06:00.739838 12987 solver.cpp:228] Iteration 421000, loss = 0.838727 +I0114 08:06:00.740105 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 08:06:00.740147 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0114 08:06:00.740169 12987 solver.cpp:244] Train net output #2: prob = 1.21678 (* 1 = 1.21678 loss) +I0114 08:06:00.990875 12987 sgd_solver.cpp:106] Iteration 421000, lr = 0.001 +I0114 08:14:46.468088 12987 solver.cpp:228] Iteration 422000, loss = 0.842823 +I0114 08:14:46.468338 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 08:14:46.468370 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 08:14:46.468391 12987 solver.cpp:244] Train net output #2: prob = 0.849192 (* 1 = 0.849192 loss) +I0114 08:14:46.721366 12987 sgd_solver.cpp:106] Iteration 422000, lr = 0.001 +I0114 08:23:32.411362 12987 solver.cpp:228] Iteration 423000, loss = 0.815792 +I0114 08:23:32.411635 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0114 08:23:32.411666 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 08:23:32.411695 12987 solver.cpp:244] Train net output #2: prob = 0.83135 (* 1 = 0.83135 loss) +I0114 08:23:32.665570 12987 sgd_solver.cpp:106] Iteration 423000, lr = 0.001 +I0114 08:32:18.739603 12987 solver.cpp:228] Iteration 424000, loss = 0.80743 +I0114 08:32:18.739858 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 08:32:18.739908 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 08:32:18.739943 12987 solver.cpp:244] Train net output #2: prob = 0.489086 (* 1 = 0.489086 loss) +I0114 08:32:18.992147 12987 sgd_solver.cpp:106] Iteration 424000, lr = 0.001 +I0114 08:41:04.872531 12987 solver.cpp:228] Iteration 425000, loss = 0.797116 +I0114 08:41:04.872747 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 08:41:04.872757 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 08:41:04.872768 12987 solver.cpp:244] Train net output #2: prob = 0.800787 (* 1 = 0.800787 loss) +I0114 08:41:05.127607 12987 sgd_solver.cpp:106] Iteration 425000, lr = 0.001 +I0114 08:49:50.994619 12987 solver.cpp:228] Iteration 426000, loss = 0.780234 +I0114 08:49:50.994793 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0114 08:49:50.994804 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 08:49:50.994815 12987 solver.cpp:244] Train net output #2: prob = 0.422875 (* 1 = 0.422875 loss) +I0114 08:49:51.245429 12987 sgd_solver.cpp:106] Iteration 426000, lr = 0.001 +I0114 08:58:36.439558 12987 solver.cpp:228] Iteration 427000, loss = 0.747275 +I0114 08:58:36.439806 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 08:58:36.439837 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 08:58:36.439853 12987 solver.cpp:244] Train net output #2: prob = 1.08301 (* 1 = 1.08301 loss) +I0114 08:58:36.691577 12987 sgd_solver.cpp:106] Iteration 427000, lr = 0.001 +I0114 09:07:22.429692 12987 solver.cpp:228] Iteration 428000, loss = 0.734865 +I0114 09:07:22.429991 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 09:07:22.430033 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 09:07:22.430059 12987 solver.cpp:244] Train net output #2: prob = 0.798102 (* 1 = 0.798102 loss) +I0114 09:07:22.681185 12987 sgd_solver.cpp:106] Iteration 428000, lr = 0.001 +I0114 09:16:08.622839 12987 solver.cpp:228] Iteration 429000, loss = 0.717377 +I0114 09:16:08.623114 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 09:16:08.623160 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 09:16:08.623188 12987 solver.cpp:244] Train net output #2: prob = 0.772211 (* 1 = 0.772211 loss) +I0114 09:16:08.875828 12987 sgd_solver.cpp:106] Iteration 429000, lr = 0.001 +I0114 09:24:54.712455 12987 solver.cpp:228] Iteration 430000, loss = 0.684794 +I0114 09:24:54.712666 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0114 09:24:54.712678 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 09:24:54.712692 12987 solver.cpp:244] Train net output #2: prob = 0.344038 (* 1 = 0.344038 loss) +I0114 09:24:54.957279 12987 sgd_solver.cpp:106] Iteration 430000, lr = 0.001 +I0114 09:33:40.850940 12987 solver.cpp:228] Iteration 431000, loss = 0.809826 +I0114 09:33:40.851202 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0114 09:33:40.851212 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 09:33:40.851224 12987 solver.cpp:244] Train net output #2: prob = 0.904509 (* 1 = 0.904509 loss) +I0114 09:33:41.105535 12987 sgd_solver.cpp:106] Iteration 431000, lr = 0.001 +I0114 09:42:27.240416 12987 solver.cpp:228] Iteration 432000, loss = 0.800736 +I0114 09:42:27.242177 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 09:42:27.242207 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 09:42:27.242220 12987 solver.cpp:244] Train net output #2: prob = 0.762531 (* 1 = 0.762531 loss) +I0114 09:42:27.491514 12987 sgd_solver.cpp:106] Iteration 432000, lr = 0.001 +I0114 09:51:13.538944 12987 solver.cpp:228] Iteration 433000, loss = 0.797589 +I0114 09:51:13.539191 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 09:51:13.539227 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 09:51:13.539260 12987 solver.cpp:244] Train net output #2: prob = 0.766171 (* 1 = 0.766171 loss) +I0114 09:51:13.787899 12987 sgd_solver.cpp:106] Iteration 433000, lr = 0.001 +I0114 09:59:59.898902 12987 solver.cpp:228] Iteration 434000, loss = 0.813098 +I0114 09:59:59.899200 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 09:59:59.899237 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 09:59:59.899258 12987 solver.cpp:244] Train net output #2: prob = 1.35061 (* 1 = 1.35061 loss) +I0114 10:00:00.150419 12987 sgd_solver.cpp:106] Iteration 434000, lr = 0.001 +I0114 10:08:46.056730 12987 solver.cpp:228] Iteration 435000, loss = 0.78426 +I0114 10:08:46.058825 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0114 10:08:46.058835 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 10:08:46.058848 12987 solver.cpp:244] Train net output #2: prob = 0.956058 (* 1 = 0.956058 loss) +I0114 10:08:46.307579 12987 sgd_solver.cpp:106] Iteration 435000, lr = 0.001 +I0114 10:17:32.050691 12987 solver.cpp:228] Iteration 436000, loss = 0.750922 +I0114 10:17:32.050930 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 10:17:32.050941 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0114 10:17:32.050953 12987 solver.cpp:244] Train net output #2: prob = 0.95633 (* 1 = 0.95633 loss) +I0114 10:17:32.305461 12987 sgd_solver.cpp:106] Iteration 436000, lr = 0.001 +I0114 10:26:17.996501 12987 solver.cpp:228] Iteration 437000, loss = 0.731242 +I0114 10:26:17.996737 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 10:26:17.996765 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 10:26:17.996783 12987 solver.cpp:244] Train net output #2: prob = 0.545444 (* 1 = 0.545444 loss) +I0114 10:26:18.251062 12987 sgd_solver.cpp:106] Iteration 437000, lr = 0.001 +I0114 10:35:04.925686 12987 solver.cpp:228] Iteration 438000, loss = 0.715997 +I0114 10:35:04.925954 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 10:35:04.925968 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 10:35:04.925987 12987 solver.cpp:244] Train net output #2: prob = 0.464845 (* 1 = 0.464845 loss) +I0114 10:35:05.177880 12987 sgd_solver.cpp:106] Iteration 438000, lr = 0.001 +I0114 10:43:53.497501 12987 solver.cpp:228] Iteration 439000, loss = 0.715413 +I0114 10:43:53.497741 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 10:43:53.497787 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 10:43:53.497819 12987 solver.cpp:244] Train net output #2: prob = 0.772465 (* 1 = 0.772465 loss) +I0114 10:43:53.752279 12987 sgd_solver.cpp:106] Iteration 439000, lr = 0.001 +I0114 10:52:47.515126 12987 solver.cpp:228] Iteration 440000, loss = 0.666961 +I0114 10:52:47.515348 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 10:52:47.515363 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 10:52:47.515379 12987 solver.cpp:244] Train net output #2: prob = 0.808721 (* 1 = 0.808721 loss) +I0114 10:52:47.767848 12987 sgd_solver.cpp:106] Iteration 440000, lr = 0.001 +I0114 11:01:41.843818 12987 solver.cpp:228] Iteration 441000, loss = 0.781472 +I0114 11:01:41.843971 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 11:01:41.843981 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 11:01:41.843991 12987 solver.cpp:244] Train net output #2: prob = 0.675806 (* 1 = 0.675806 loss) +I0114 11:01:42.126623 12987 sgd_solver.cpp:106] Iteration 441000, lr = 0.001 +I0114 11:10:36.060663 12987 solver.cpp:228] Iteration 442000, loss = 0.770034 +I0114 11:10:36.060895 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 11:10:36.060926 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 11:10:36.060941 12987 solver.cpp:244] Train net output #2: prob = 0.369742 (* 1 = 0.369742 loss) +I0114 11:10:36.321563 12987 sgd_solver.cpp:106] Iteration 442000, lr = 0.001 +I0114 11:19:31.547415 12987 solver.cpp:228] Iteration 443000, loss = 0.757295 +I0114 11:19:31.547623 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 11:19:31.547636 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 11:19:31.547652 12987 solver.cpp:244] Train net output #2: prob = 0.870894 (* 1 = 0.870894 loss) +I0114 11:19:31.800760 12987 sgd_solver.cpp:106] Iteration 443000, lr = 0.001 +I0114 11:28:26.914476 12987 solver.cpp:228] Iteration 444000, loss = 0.756636 +I0114 11:28:26.914700 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 11:28:26.914728 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 11:28:26.914757 12987 solver.cpp:244] Train net output #2: prob = 0.971323 (* 1 = 0.971323 loss) +I0114 11:28:27.169760 12987 sgd_solver.cpp:106] Iteration 444000, lr = 0.001 +I0114 11:37:21.722147 12987 solver.cpp:228] Iteration 445000, loss = 0.745899 +I0114 11:37:21.722324 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 11:37:21.722333 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0114 11:37:21.722345 12987 solver.cpp:244] Train net output #2: prob = 0.858952 (* 1 = 0.858952 loss) +I0114 11:37:21.967083 12987 sgd_solver.cpp:106] Iteration 445000, lr = 0.001 +I0114 11:46:16.427876 12987 solver.cpp:228] Iteration 446000, loss = 0.737171 +I0114 11:46:16.428045 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 11:46:16.428053 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 11:46:16.428063 12987 solver.cpp:244] Train net output #2: prob = 0.609358 (* 1 = 0.609358 loss) +I0114 11:46:16.681427 12987 sgd_solver.cpp:106] Iteration 446000, lr = 0.001 +I0114 11:55:11.372936 12987 solver.cpp:228] Iteration 447000, loss = 0.713052 +I0114 11:55:11.373150 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 11:55:11.373165 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 11:55:11.373183 12987 solver.cpp:244] Train net output #2: prob = 0.724798 (* 1 = 0.724798 loss) +I0114 11:55:11.622064 12987 sgd_solver.cpp:106] Iteration 447000, lr = 0.001 +I0114 12:04:05.346087 12987 solver.cpp:228] Iteration 448000, loss = 0.70169 +I0114 12:04:05.346333 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 12:04:05.346366 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0114 12:04:05.346406 12987 solver.cpp:244] Train net output #2: prob = 1.11924 (* 1 = 1.11924 loss) +I0114 12:04:05.601599 12987 sgd_solver.cpp:106] Iteration 448000, lr = 0.001 +I0114 12:12:59.425521 12987 solver.cpp:228] Iteration 449000, loss = 0.68535 +I0114 12:12:59.425767 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 12:12:59.425797 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 12:12:59.425829 12987 solver.cpp:244] Train net output #2: prob = 0.581704 (* 1 = 0.581704 loss) +I0114 12:12:59.675134 12987 sgd_solver.cpp:106] Iteration 449000, lr = 0.001 +I0114 12:21:54.298655 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_450000.caffemodel +I0114 12:21:58.245617 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_450000.solverstate +I0114 12:21:58.769225 12987 solver.cpp:228] Iteration 450000, loss = 0.664604 +I0114 12:21:58.769330 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 12:21:58.769364 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 12:21:58.769398 12987 solver.cpp:244] Train net output #2: prob = 0.725347 (* 1 = 0.725347 loss) +I0114 12:21:59.020678 12987 sgd_solver.cpp:106] Iteration 450000, lr = 0.001 +I0114 12:30:53.423056 12987 solver.cpp:228] Iteration 451000, loss = 0.749828 +I0114 12:30:53.423297 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 12:30:53.423316 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 12:30:53.423328 12987 solver.cpp:244] Train net output #2: prob = 1.07002 (* 1 = 1.07002 loss) +I0114 12:30:53.690531 12987 sgd_solver.cpp:106] Iteration 451000, lr = 0.001 +I0114 12:39:48.356386 12987 solver.cpp:228] Iteration 452000, loss = 0.749746 +I0114 12:39:48.356552 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 12:39:48.356564 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 12:39:48.356575 12987 solver.cpp:244] Train net output #2: prob = 0.623562 (* 1 = 0.623562 loss) +I0114 12:39:48.609205 12987 sgd_solver.cpp:106] Iteration 452000, lr = 0.001 +I0114 12:48:42.144919 12987 solver.cpp:228] Iteration 453000, loss = 0.742174 +I0114 12:48:42.145153 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 12:48:42.145172 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 12:48:42.145189 12987 solver.cpp:244] Train net output #2: prob = 0.382561 (* 1 = 0.382561 loss) +I0114 12:48:42.396222 12987 sgd_solver.cpp:106] Iteration 453000, lr = 0.001 +I0114 12:57:36.697098 12987 solver.cpp:228] Iteration 454000, loss = 0.73831 +I0114 12:57:36.697343 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 12:57:36.697371 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 12:57:36.697397 12987 solver.cpp:244] Train net output #2: prob = 0.573364 (* 1 = 0.573364 loss) +I0114 12:57:36.950718 12987 sgd_solver.cpp:106] Iteration 454000, lr = 0.001 +I0114 13:06:31.727082 12987 solver.cpp:228] Iteration 455000, loss = 0.720701 +I0114 13:06:31.727284 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 13:06:31.727296 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 13:06:31.727313 12987 solver.cpp:244] Train net output #2: prob = 0.740667 (* 1 = 0.740667 loss) +I0114 13:06:31.990578 12987 sgd_solver.cpp:106] Iteration 455000, lr = 0.001 +I0114 13:15:26.848918 12987 solver.cpp:228] Iteration 456000, loss = 0.721574 +I0114 13:15:26.849078 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 13:15:26.849087 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 13:15:26.849098 12987 solver.cpp:244] Train net output #2: prob = 0.625159 (* 1 = 0.625159 loss) +I0114 13:15:27.102601 12987 sgd_solver.cpp:106] Iteration 456000, lr = 0.001 +I0114 13:24:18.220366 12987 solver.cpp:228] Iteration 457000, loss = 0.69736 +I0114 13:24:18.220613 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 13:24:18.220651 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 13:24:18.220669 12987 solver.cpp:244] Train net output #2: prob = 0.610874 (* 1 = 0.610874 loss) +I0114 13:24:18.473613 12987 sgd_solver.cpp:106] Iteration 457000, lr = 0.001 +I0114 13:33:04.755270 12987 solver.cpp:228] Iteration 458000, loss = 0.686286 +I0114 13:33:04.755539 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 13:33:04.755550 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 13:33:04.755563 12987 solver.cpp:244] Train net output #2: prob = 0.626799 (* 1 = 0.626799 loss) +I0114 13:33:05.006978 12987 sgd_solver.cpp:106] Iteration 458000, lr = 0.001 +I0114 13:41:51.394596 12987 solver.cpp:228] Iteration 459000, loss = 0.663138 +I0114 13:41:51.394831 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 13:41:51.394857 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 13:41:51.394875 12987 solver.cpp:244] Train net output #2: prob = 0.783001 (* 1 = 0.783001 loss) +I0114 13:41:51.645858 12987 sgd_solver.cpp:106] Iteration 459000, lr = 0.001 +I0114 13:50:37.710583 12987 solver.cpp:228] Iteration 460000, loss = 0.658898 +I0114 13:50:37.710803 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 13:50:37.710829 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 13:50:37.710842 12987 solver.cpp:244] Train net output #2: prob = 0.643045 (* 1 = 0.643045 loss) +I0114 13:50:37.959204 12987 sgd_solver.cpp:106] Iteration 460000, lr = 0.001 +I0114 13:59:24.288527 12987 solver.cpp:228] Iteration 461000, loss = 0.736629 +I0114 13:59:24.288728 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 13:59:24.288739 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 13:59:24.288753 12987 solver.cpp:244] Train net output #2: prob = 0.49774 (* 1 = 0.49774 loss) +I0114 13:59:24.543455 12987 sgd_solver.cpp:106] Iteration 461000, lr = 0.001 +I0114 14:08:10.672380 12987 solver.cpp:228] Iteration 462000, loss = 0.742683 +I0114 14:08:10.672600 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0114 14:08:10.672610 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 14:08:10.672621 12987 solver.cpp:244] Train net output #2: prob = 0.872332 (* 1 = 0.872332 loss) +I0114 14:08:10.927734 12987 sgd_solver.cpp:106] Iteration 462000, lr = 0.001 +I0114 14:16:59.441200 12987 solver.cpp:228] Iteration 463000, loss = 0.722353 +I0114 14:16:59.441465 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 14:16:59.441499 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 14:16:59.441537 12987 solver.cpp:244] Train net output #2: prob = 0.754075 (* 1 = 0.754075 loss) +I0114 14:16:59.694249 12987 sgd_solver.cpp:106] Iteration 463000, lr = 0.001 +I0114 14:25:53.137409 12987 solver.cpp:228] Iteration 464000, loss = 0.720535 +I0114 14:25:53.137624 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 14:25:53.137639 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 14:25:53.137653 12987 solver.cpp:244] Train net output #2: prob = 0.767441 (* 1 = 0.767441 loss) +I0114 14:25:53.390959 12987 sgd_solver.cpp:106] Iteration 464000, lr = 0.001 +I0114 14:34:47.532955 12987 solver.cpp:228] Iteration 465000, loss = 0.713738 +I0114 14:34:47.533177 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0114 14:34:47.533192 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 14:34:47.533210 12987 solver.cpp:244] Train net output #2: prob = 0.978676 (* 1 = 0.978676 loss) +I0114 14:34:47.786712 12987 sgd_solver.cpp:106] Iteration 465000, lr = 0.001 +I0114 14:43:41.050395 12987 solver.cpp:228] Iteration 466000, loss = 0.700162 +I0114 14:43:41.050561 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 14:43:41.050571 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 14:43:41.050581 12987 solver.cpp:244] Train net output #2: prob = 0.865843 (* 1 = 0.865843 loss) +I0114 14:43:41.312430 12987 sgd_solver.cpp:106] Iteration 466000, lr = 0.001 +I0114 14:52:34.931632 12987 solver.cpp:228] Iteration 467000, loss = 0.671042 +I0114 14:52:34.931890 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 14:52:34.931920 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 14:52:34.931946 12987 solver.cpp:244] Train net output #2: prob = 0.376 (* 1 = 0.376 loss) +I0114 14:52:35.186215 12987 sgd_solver.cpp:106] Iteration 467000, lr = 0.001 +I0114 15:01:36.366295 12987 solver.cpp:228] Iteration 468000, loss = 0.672508 +I0114 15:01:36.366518 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 15:01:36.366552 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 15:01:36.366571 12987 solver.cpp:244] Train net output #2: prob = 0.82189 (* 1 = 0.82189 loss) +I0114 15:01:36.625989 12987 sgd_solver.cpp:106] Iteration 468000, lr = 0.001 +I0114 15:10:27.908221 12987 solver.cpp:228] Iteration 469000, loss = 0.662506 +I0114 15:10:27.908505 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 15:10:27.908548 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 15:10:27.908568 12987 solver.cpp:244] Train net output #2: prob = 0.536949 (* 1 = 0.536949 loss) +I0114 15:10:28.161634 12987 sgd_solver.cpp:106] Iteration 469000, lr = 0.001 +I0114 15:19:14.915424 12987 solver.cpp:228] Iteration 470000, loss = 0.636535 +I0114 15:19:14.915632 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 15:19:14.915647 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 15:19:14.915663 12987 solver.cpp:244] Train net output #2: prob = 0.503339 (* 1 = 0.503339 loss) +I0114 15:19:15.170642 12987 sgd_solver.cpp:106] Iteration 470000, lr = 0.001 +I0114 15:28:02.275916 12987 solver.cpp:228] Iteration 471000, loss = 0.72215 +I0114 15:28:02.276170 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 15:28:02.276204 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 15:28:02.276219 12987 solver.cpp:244] Train net output #2: prob = 0.53749 (* 1 = 0.53749 loss) +I0114 15:28:02.530802 12987 sgd_solver.cpp:106] Iteration 471000, lr = 0.001 +I0114 15:36:49.510835 12987 solver.cpp:228] Iteration 472000, loss = 0.724129 +I0114 15:36:49.511081 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 15:36:49.511123 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 15:36:49.511147 12987 solver.cpp:244] Train net output #2: prob = 0.841706 (* 1 = 0.841706 loss) +I0114 15:36:49.755362 12987 sgd_solver.cpp:106] Iteration 472000, lr = 0.001 +I0114 15:45:37.542450 12987 solver.cpp:228] Iteration 473000, loss = 0.717999 +I0114 15:45:37.542680 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 15:45:37.542707 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 15:45:37.542729 12987 solver.cpp:244] Train net output #2: prob = 0.515587 (* 1 = 0.515587 loss) +I0114 15:45:37.797615 12987 sgd_solver.cpp:106] Iteration 473000, lr = 0.001 +I0114 15:54:26.452774 12987 solver.cpp:228] Iteration 474000, loss = 0.728299 +I0114 15:54:26.452967 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 15:54:26.452980 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 15:54:26.452993 12987 solver.cpp:244] Train net output #2: prob = 0.355786 (* 1 = 0.355786 loss) +I0114 15:54:26.704141 12987 sgd_solver.cpp:106] Iteration 474000, lr = 0.001 +I0114 16:03:18.451378 12987 solver.cpp:228] Iteration 475000, loss = 0.697906 +I0114 16:03:18.451566 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 16:03:18.451578 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 16:03:18.451594 12987 solver.cpp:244] Train net output #2: prob = 0.630658 (* 1 = 0.630658 loss) +I0114 16:03:18.705819 12987 sgd_solver.cpp:106] Iteration 475000, lr = 0.001 +I0114 16:12:13.061990 12987 solver.cpp:228] Iteration 476000, loss = 0.688747 +I0114 16:12:13.062152 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0114 16:12:13.062161 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 16:12:13.062175 12987 solver.cpp:244] Train net output #2: prob = 0.327111 (* 1 = 0.327111 loss) +I0114 16:12:13.316650 12987 sgd_solver.cpp:106] Iteration 476000, lr = 0.001 +I0114 16:21:05.909489 12987 solver.cpp:228] Iteration 477000, loss = 0.67046 +I0114 16:21:05.909728 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 16:21:05.909754 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 16:21:05.909785 12987 solver.cpp:244] Train net output #2: prob = 0.559527 (* 1 = 0.559527 loss) +I0114 16:21:06.163256 12987 sgd_solver.cpp:106] Iteration 477000, lr = 0.001 +I0114 16:30:00.397693 12987 solver.cpp:228] Iteration 478000, loss = 0.660646 +I0114 16:30:00.397897 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 16:30:00.397912 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 16:30:00.397923 12987 solver.cpp:244] Train net output #2: prob = 0.411168 (* 1 = 0.411168 loss) +I0114 16:30:00.687209 12987 sgd_solver.cpp:106] Iteration 478000, lr = 0.001 +I0114 16:38:53.445348 12987 solver.cpp:228] Iteration 479000, loss = 0.652486 +I0114 16:38:53.445540 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0114 16:38:53.445552 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 16:38:53.445569 12987 solver.cpp:244] Train net output #2: prob = 0.283558 (* 1 = 0.283558 loss) +I0114 16:38:53.702425 12987 sgd_solver.cpp:106] Iteration 479000, lr = 0.001 +I0114 16:47:47.553453 12987 solver.cpp:228] Iteration 480000, loss = 0.6205 +I0114 16:47:47.553643 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 16:47:47.553653 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 16:47:47.553663 12987 solver.cpp:244] Train net output #2: prob = 0.574065 (* 1 = 0.574065 loss) +I0114 16:47:47.797338 12987 sgd_solver.cpp:106] Iteration 480000, lr = 0.001 +I0114 16:56:41.683910 12987 solver.cpp:228] Iteration 481000, loss = 0.71777 +I0114 16:56:41.684080 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 16:56:41.684092 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 16:56:41.684108 12987 solver.cpp:244] Train net output #2: prob = 0.645326 (* 1 = 0.645326 loss) +I0114 16:56:41.940769 12987 sgd_solver.cpp:106] Iteration 481000, lr = 0.001 +I0114 17:05:33.912850 12987 solver.cpp:228] Iteration 482000, loss = 0.70034 +I0114 17:05:33.913187 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 17:05:33.913231 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 17:05:33.913256 12987 solver.cpp:244] Train net output #2: prob = 0.407978 (* 1 = 0.407978 loss) +I0114 17:05:34.166816 12987 sgd_solver.cpp:106] Iteration 482000, lr = 0.001 +I0114 17:14:22.849360 12987 solver.cpp:228] Iteration 483000, loss = 0.690888 +I0114 17:14:22.849527 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 17:14:22.849537 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 17:14:22.849548 12987 solver.cpp:244] Train net output #2: prob = 0.773648 (* 1 = 0.773648 loss) +I0114 17:14:23.113481 12987 sgd_solver.cpp:106] Iteration 483000, lr = 0.001 +I0114 17:23:17.983553 12987 solver.cpp:228] Iteration 484000, loss = 0.698915 +I0114 17:23:17.983767 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 17:23:17.983783 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 17:23:17.983799 12987 solver.cpp:244] Train net output #2: prob = 0.54237 (* 1 = 0.54237 loss) +I0114 17:23:18.245126 12987 sgd_solver.cpp:106] Iteration 484000, lr = 0.001 +I0114 17:32:12.545811 12987 solver.cpp:228] Iteration 485000, loss = 0.691232 +I0114 17:32:12.546030 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0114 17:32:12.546056 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 17:32:12.546073 12987 solver.cpp:244] Train net output #2: prob = 0.280981 (* 1 = 0.280981 loss) +I0114 17:32:12.794062 12987 sgd_solver.cpp:106] Iteration 485000, lr = 0.001 +I0114 17:41:05.876240 12987 solver.cpp:228] Iteration 486000, loss = 0.690659 +I0114 17:41:05.876441 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 17:41:05.876454 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 17:41:05.876467 12987 solver.cpp:244] Train net output #2: prob = 0.773973 (* 1 = 0.773973 loss) +I0114 17:41:06.129462 12987 sgd_solver.cpp:106] Iteration 486000, lr = 0.001 +I0114 17:50:00.811509 12987 solver.cpp:228] Iteration 487000, loss = 0.659332 +I0114 17:50:00.811691 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 17:50:00.811704 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 17:50:00.811717 12987 solver.cpp:244] Train net output #2: prob = 0.673431 (* 1 = 0.673431 loss) +I0114 17:50:01.066645 12987 sgd_solver.cpp:106] Iteration 487000, lr = 0.001 +I0114 17:58:54.535269 12987 solver.cpp:228] Iteration 488000, loss = 0.648822 +I0114 17:58:54.535491 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 17:58:54.535506 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 17:58:54.535524 12987 solver.cpp:244] Train net output #2: prob = 0.55234 (* 1 = 0.55234 loss) +I0114 17:58:54.788597 12987 sgd_solver.cpp:106] Iteration 488000, lr = 0.001 +I0114 18:07:49.786638 12987 solver.cpp:228] Iteration 489000, loss = 0.6314 +I0114 18:07:49.786825 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 18:07:49.786837 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 18:07:49.786850 12987 solver.cpp:244] Train net output #2: prob = 0.725362 (* 1 = 0.725362 loss) +I0114 18:07:50.043846 12987 sgd_solver.cpp:106] Iteration 489000, lr = 0.001 +I0114 18:16:43.314728 12987 solver.cpp:228] Iteration 490000, loss = 0.62298 +I0114 18:16:43.315284 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 18:16:43.315372 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 18:16:43.315441 12987 solver.cpp:244] Train net output #2: prob = 0.623765 (* 1 = 0.623765 loss) +I0114 18:16:43.573549 12987 sgd_solver.cpp:106] Iteration 490000, lr = 0.001 +I0114 18:25:37.941812 12987 solver.cpp:228] Iteration 491000, loss = 0.68875 +I0114 18:25:37.941989 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 18:25:37.941998 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 18:25:37.942009 12987 solver.cpp:244] Train net output #2: prob = 0.688046 (* 1 = 0.688046 loss) +I0114 18:25:38.210782 12987 sgd_solver.cpp:106] Iteration 491000, lr = 0.001 +I0114 18:34:30.645566 12987 solver.cpp:228] Iteration 492000, loss = 0.69477 +I0114 18:34:30.645762 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 18:34:30.645776 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 18:34:30.645792 12987 solver.cpp:244] Train net output #2: prob = 0.590649 (* 1 = 0.590649 loss) +I0114 18:34:30.899605 12987 sgd_solver.cpp:106] Iteration 492000, lr = 0.001 +I0114 18:43:24.385514 12987 solver.cpp:228] Iteration 493000, loss = 0.678866 +I0114 18:43:24.385761 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.59375 +I0114 18:43:24.385789 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.8125 +I0114 18:43:24.385819 12987 solver.cpp:244] Train net output #2: prob = 1.51726 (* 1 = 1.51726 loss) +I0114 18:43:24.635388 12987 sgd_solver.cpp:106] Iteration 493000, lr = 0.001 +I0114 18:52:17.139844 12987 solver.cpp:228] Iteration 494000, loss = 0.681992 +I0114 18:52:17.140069 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 18:52:17.140094 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 18:52:17.140120 12987 solver.cpp:244] Train net output #2: prob = 0.773085 (* 1 = 0.773085 loss) +I0114 18:52:17.389237 12987 sgd_solver.cpp:106] Iteration 494000, lr = 0.001 +I0114 19:01:13.192263 12987 solver.cpp:228] Iteration 495000, loss = 0.67055 +I0114 19:01:13.192461 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 19:01:13.192473 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 19:01:13.192487 12987 solver.cpp:244] Train net output #2: prob = 0.376479 (* 1 = 0.376479 loss) +I0114 19:01:13.444089 12987 sgd_solver.cpp:106] Iteration 495000, lr = 0.001 +I0114 19:10:07.245393 12987 solver.cpp:228] Iteration 496000, loss = 0.667127 +I0114 19:10:07.245610 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 19:10:07.245638 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 19:10:07.245656 12987 solver.cpp:244] Train net output #2: prob = 0.451863 (* 1 = 0.451863 loss) +I0114 19:10:07.499857 12987 sgd_solver.cpp:106] Iteration 496000, lr = 0.001 +I0114 19:18:39.223518 13000 blocking_queue.cpp:50] Waiting for data +I0114 19:19:05.945442 13010 blocking_queue.cpp:50] Waiting for data +I0114 19:19:06.441524 12987 solver.cpp:228] Iteration 497000, loss = 0.649223 +I0114 19:19:06.441573 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 19:19:06.441582 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 19:19:06.441594 12987 solver.cpp:244] Train net output #2: prob = 0.855879 (* 1 = 0.855879 loss) +I0114 19:19:06.693609 12987 sgd_solver.cpp:106] Iteration 497000, lr = 0.001 +I0114 19:27:37.598986 13000 blocking_queue.cpp:50] Waiting for data +I0114 19:28:02.772050 12987 solver.cpp:228] Iteration 498000, loss = 0.651096 +I0114 19:28:02.772089 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 19:28:02.772095 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 19:28:02.772104 12987 solver.cpp:244] Train net output #2: prob = 0.386741 (* 1 = 0.386741 loss) +I0114 19:28:03.027655 12987 sgd_solver.cpp:106] Iteration 498000, lr = 0.001 +I0114 19:33:33.447197 13003 blocking_queue.cpp:50] Waiting for data +I0114 19:36:57.127378 12987 solver.cpp:228] Iteration 499000, loss = 0.622052 +I0114 19:36:57.127562 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 19:36:57.127570 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 19:36:57.127580 12987 solver.cpp:244] Train net output #2: prob = 0.616567 (* 1 = 0.616567 loss) +I0114 19:36:57.377696 12987 sgd_solver.cpp:106] Iteration 499000, lr = 0.001 +I0114 19:45:51.646553 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_500000.caffemodel +I0114 19:45:59.535962 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_500000.solverstate +I0114 19:46:00.092412 12987 solver.cpp:228] Iteration 500000, loss = 0.630675 +I0114 19:46:00.092449 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 19:46:00.092458 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 19:46:00.092469 12987 solver.cpp:244] Train net output #2: prob = 0.618624 (* 1 = 0.618624 loss) +I0114 19:46:00.331218 12987 sgd_solver.cpp:106] Iteration 500000, lr = 0.001 +I0114 19:54:53.002061 12987 solver.cpp:228] Iteration 501000, loss = 0.685772 +I0114 19:54:53.002303 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 19:54:53.002331 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 19:54:53.002346 12987 solver.cpp:244] Train net output #2: prob = 0.712465 (* 1 = 0.712465 loss) +I0114 19:54:53.256098 12987 sgd_solver.cpp:106] Iteration 501000, lr = 0.001 +I0114 19:58:32.126767 12997 blocking_queue.cpp:50] Waiting for data +I0114 20:03:48.140156 12987 solver.cpp:228] Iteration 502000, loss = 0.702632 +I0114 20:03:48.140383 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 20:03:48.140410 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 20:03:48.140439 12987 solver.cpp:244] Train net output #2: prob = 0.623786 (* 1 = 0.623786 loss) +I0114 20:03:48.379653 12987 sgd_solver.cpp:106] Iteration 502000, lr = 0.001 +I0114 20:12:42.593577 12987 solver.cpp:228] Iteration 503000, loss = 0.672892 +I0114 20:12:42.593818 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 20:12:42.593848 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 20:12:42.593864 12987 solver.cpp:244] Train net output #2: prob = 0.742344 (* 1 = 0.742344 loss) +I0114 20:12:42.842525 12987 sgd_solver.cpp:106] Iteration 503000, lr = 0.001 +I0114 20:21:36.391476 12987 solver.cpp:228] Iteration 504000, loss = 0.66965 +I0114 20:21:36.391649 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 20:21:36.391660 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 20:21:36.391674 12987 solver.cpp:244] Train net output #2: prob = 0.657757 (* 1 = 0.657757 loss) +I0114 20:21:36.684550 12987 sgd_solver.cpp:106] Iteration 504000, lr = 0.001 +I0114 20:30:31.122210 12987 solver.cpp:228] Iteration 505000, loss = 0.675388 +I0114 20:30:31.122618 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 20:30:31.122704 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 20:30:31.122777 12987 solver.cpp:244] Train net output #2: prob = 0.975744 (* 1 = 0.975744 loss) +I0114 20:30:31.377362 12987 sgd_solver.cpp:106] Iteration 505000, lr = 0.001 +I0114 20:39:26.947592 12987 solver.cpp:228] Iteration 506000, loss = 0.655918 +I0114 20:39:26.947854 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 20:39:26.947902 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 20:39:26.947932 12987 solver.cpp:244] Train net output #2: prob = 0.416965 (* 1 = 0.416965 loss) +I0114 20:39:27.196629 12987 sgd_solver.cpp:106] Iteration 506000, lr = 0.001 +I0114 20:48:21.076547 12987 solver.cpp:228] Iteration 507000, loss = 0.637986 +I0114 20:48:21.076736 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 20:48:21.076748 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 20:48:21.076766 12987 solver.cpp:244] Train net output #2: prob = 0.473802 (* 1 = 0.473802 loss) +I0114 20:48:21.329202 12987 sgd_solver.cpp:106] Iteration 507000, lr = 0.001 +I0114 20:57:15.666265 12987 solver.cpp:228] Iteration 508000, loss = 0.629436 +I0114 20:57:15.666472 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 20:57:15.666484 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 20:57:15.666497 12987 solver.cpp:244] Train net output #2: prob = 0.582092 (* 1 = 0.582092 loss) +I0114 20:57:15.918361 12987 sgd_solver.cpp:106] Iteration 508000, lr = 0.001 +I0114 21:06:10.529165 12987 solver.cpp:228] Iteration 509000, loss = 0.631051 +I0114 21:06:10.529414 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 21:06:10.529448 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 21:06:10.529477 12987 solver.cpp:244] Train net output #2: prob = 0.503693 (* 1 = 0.503693 loss) +I0114 21:06:10.783815 12987 sgd_solver.cpp:106] Iteration 509000, lr = 0.001 +I0114 21:15:04.533547 12987 solver.cpp:228] Iteration 510000, loss = 0.608725 +I0114 21:15:04.533746 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 21:15:04.533758 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 21:15:04.533773 12987 solver.cpp:244] Train net output #2: prob = 0.651122 (* 1 = 0.651122 loss) +I0114 21:15:04.784610 12987 sgd_solver.cpp:106] Iteration 510000, lr = 0.001 +I0114 21:23:58.096087 12987 solver.cpp:228] Iteration 511000, loss = 0.673899 +I0114 21:23:58.096330 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 21:23:58.096360 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 21:23:58.096390 12987 solver.cpp:244] Train net output #2: prob = 0.545684 (* 1 = 0.545684 loss) +I0114 21:23:58.348157 12987 sgd_solver.cpp:106] Iteration 511000, lr = 0.001 +I0114 21:32:52.018049 12987 solver.cpp:228] Iteration 512000, loss = 0.674454 +I0114 21:32:52.018277 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 21:32:52.018306 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 21:32:52.018333 12987 solver.cpp:244] Train net output #2: prob = 0.316198 (* 1 = 0.316198 loss) +I0114 21:32:52.252334 12987 sgd_solver.cpp:106] Iteration 512000, lr = 0.001 +I0114 21:41:46.420749 12987 solver.cpp:228] Iteration 513000, loss = 0.671268 +I0114 21:41:46.420930 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 21:41:46.420943 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 21:41:46.420963 12987 solver.cpp:244] Train net output #2: prob = 0.610347 (* 1 = 0.610347 loss) +I0114 21:41:46.673627 12987 sgd_solver.cpp:106] Iteration 513000, lr = 0.001 +I0114 21:50:41.614526 12987 solver.cpp:228] Iteration 514000, loss = 0.683823 +I0114 21:50:41.614751 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 21:50:41.614784 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 21:50:41.614801 12987 solver.cpp:244] Train net output #2: prob = 0.689837 (* 1 = 0.689837 loss) +I0114 21:50:41.870590 12987 sgd_solver.cpp:106] Iteration 514000, lr = 0.001 +I0114 21:59:36.313958 12987 solver.cpp:228] Iteration 515000, loss = 0.663324 +I0114 21:59:36.314196 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 21:59:36.314227 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 21:59:36.314245 12987 solver.cpp:244] Train net output #2: prob = 0.645934 (* 1 = 0.645934 loss) +I0114 21:59:36.552753 12987 sgd_solver.cpp:106] Iteration 515000, lr = 0.001 +I0114 22:08:29.775908 12987 solver.cpp:228] Iteration 516000, loss = 0.649505 +I0114 22:08:29.776109 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 22:08:29.776137 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 22:08:29.776157 12987 solver.cpp:244] Train net output #2: prob = 0.449935 (* 1 = 0.449935 loss) +I0114 22:08:30.031780 12987 sgd_solver.cpp:106] Iteration 516000, lr = 0.001 +I0114 22:17:22.526810 12987 solver.cpp:228] Iteration 517000, loss = 0.635915 +I0114 22:17:22.527012 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 22:17:22.527024 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 22:17:22.527037 12987 solver.cpp:244] Train net output #2: prob = 0.772317 (* 1 = 0.772317 loss) +I0114 22:17:22.834576 12987 sgd_solver.cpp:106] Iteration 517000, lr = 0.001 +I0114 22:26:16.734144 12987 solver.cpp:228] Iteration 518000, loss = 0.635256 +I0114 22:26:16.734385 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0114 22:26:16.734416 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 22:26:16.734436 12987 solver.cpp:244] Train net output #2: prob = 0.347706 (* 1 = 0.347706 loss) +I0114 22:26:16.984529 12987 sgd_solver.cpp:106] Iteration 518000, lr = 0.001 +I0114 22:35:11.113828 12987 solver.cpp:228] Iteration 519000, loss = 0.627499 +I0114 22:35:11.114032 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0114 22:35:11.114045 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 22:35:11.114064 12987 solver.cpp:244] Train net output #2: prob = 1.11904 (* 1 = 1.11904 loss) +I0114 22:35:11.404395 12987 sgd_solver.cpp:106] Iteration 519000, lr = 0.001 +I0114 22:44:04.610489 12987 solver.cpp:228] Iteration 520000, loss = 0.594619 +I0114 22:44:04.610736 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0114 22:44:04.610767 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 22:44:04.610785 12987 solver.cpp:244] Train net output #2: prob = 0.716687 (* 1 = 0.716687 loss) +I0114 22:44:04.851243 12987 sgd_solver.cpp:106] Iteration 520000, lr = 0.001 +I0114 22:52:56.767587 12987 solver.cpp:228] Iteration 521000, loss = 0.661329 +I0114 22:52:56.767859 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0114 22:52:56.767907 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 22:52:56.767946 12987 solver.cpp:244] Train net output #2: prob = 0.562316 (* 1 = 0.562316 loss) +I0114 22:52:57.022810 12987 sgd_solver.cpp:106] Iteration 521000, lr = 0.001 +I0114 23:01:50.129642 12987 solver.cpp:228] Iteration 522000, loss = 0.661321 +I0114 23:01:50.130141 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0114 23:01:50.130172 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0114 23:01:50.130201 12987 solver.cpp:244] Train net output #2: prob = 0.671886 (* 1 = 0.671886 loss) +I0114 23:01:50.422047 12987 sgd_solver.cpp:106] Iteration 522000, lr = 0.001 +I0114 23:10:46.417078 12987 solver.cpp:228] Iteration 523000, loss = 0.643195 +I0114 23:10:46.417304 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 23:10:46.417335 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 23:10:46.417354 12987 solver.cpp:244] Train net output #2: prob = 0.517375 (* 1 = 0.517375 loss) +I0114 23:10:46.664170 12987 sgd_solver.cpp:106] Iteration 523000, lr = 0.001 +I0114 23:19:39.756193 12987 solver.cpp:228] Iteration 524000, loss = 0.657779 +I0114 23:19:39.756420 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 23:19:39.756435 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 23:19:39.756453 12987 solver.cpp:244] Train net output #2: prob = 0.554806 (* 1 = 0.554806 loss) +I0114 23:19:40.005496 12987 sgd_solver.cpp:106] Iteration 524000, lr = 0.001 +I0114 23:28:33.253109 12987 solver.cpp:228] Iteration 525000, loss = 0.640065 +I0114 23:28:33.253298 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0114 23:28:33.253309 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0114 23:28:33.253324 12987 solver.cpp:244] Train net output #2: prob = 0.502566 (* 1 = 0.502566 loss) +I0114 23:28:33.495398 12987 sgd_solver.cpp:106] Iteration 525000, lr = 0.001 +I0114 23:37:27.647428 12987 solver.cpp:228] Iteration 526000, loss = 0.645062 +I0114 23:37:27.647675 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0114 23:37:27.647703 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 23:37:27.647733 12987 solver.cpp:244] Train net output #2: prob = 0.64022 (* 1 = 0.64022 loss) +I0114 23:37:27.899998 12987 sgd_solver.cpp:106] Iteration 526000, lr = 0.001 +I0114 23:46:22.724900 12987 solver.cpp:228] Iteration 527000, loss = 0.628936 +I0114 23:46:22.725092 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0114 23:46:22.725105 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0114 23:46:22.725117 12987 solver.cpp:244] Train net output #2: prob = 0.342474 (* 1 = 0.342474 loss) +I0114 23:46:22.964841 12987 sgd_solver.cpp:106] Iteration 527000, lr = 0.001 +I0114 23:55:16.422288 12987 solver.cpp:228] Iteration 528000, loss = 0.625686 +I0114 23:55:16.422484 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.625 +I0114 23:55:16.422497 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0114 23:55:16.422510 12987 solver.cpp:244] Train net output #2: prob = 1.35597 (* 1 = 1.35597 loss) +I0114 23:55:16.673692 12987 sgd_solver.cpp:106] Iteration 528000, lr = 0.001 +I0115 00:04:10.469456 12987 solver.cpp:228] Iteration 529000, loss = 0.606459 +I0115 00:04:10.469645 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 00:04:10.469657 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 00:04:10.469669 12987 solver.cpp:244] Train net output #2: prob = 0.385432 (* 1 = 0.385432 loss) +I0115 00:04:10.724144 12987 sgd_solver.cpp:106] Iteration 529000, lr = 0.001 +I0115 00:13:03.950366 12987 solver.cpp:228] Iteration 530000, loss = 0.595832 +I0115 00:13:03.951525 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0115 00:13:03.951539 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0115 00:13:03.951560 12987 solver.cpp:244] Train net output #2: prob = 0.893112 (* 1 = 0.893112 loss) +I0115 00:13:04.204702 12987 sgd_solver.cpp:106] Iteration 530000, lr = 0.001 +I0115 00:21:58.371624 12987 solver.cpp:228] Iteration 531000, loss = 0.637483 +I0115 00:21:58.371830 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 00:21:58.371845 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 00:21:58.371865 12987 solver.cpp:244] Train net output #2: prob = 0.438936 (* 1 = 0.438936 loss) +I0115 00:21:58.665940 12987 sgd_solver.cpp:106] Iteration 531000, lr = 0.001 +I0115 00:30:52.101778 12987 solver.cpp:228] Iteration 532000, loss = 0.646033 +I0115 00:30:52.101987 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 00:30:52.102002 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 00:30:52.102022 12987 solver.cpp:244] Train net output #2: prob = 0.544925 (* 1 = 0.544925 loss) +I0115 00:30:52.353538 12987 sgd_solver.cpp:106] Iteration 532000, lr = 0.001 +I0115 00:39:45.599836 12987 solver.cpp:228] Iteration 533000, loss = 0.633488 +I0115 00:39:45.600013 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 00:39:45.600021 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 00:39:45.600031 12987 solver.cpp:244] Train net output #2: prob = 0.765894 (* 1 = 0.765894 loss) +I0115 00:39:45.851639 12987 sgd_solver.cpp:106] Iteration 533000, lr = 0.001 +I0115 00:48:39.787647 12987 solver.cpp:228] Iteration 534000, loss = 0.637127 +I0115 00:48:39.787839 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 00:48:39.787853 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 00:48:39.787866 12987 solver.cpp:244] Train net output #2: prob = 0.577317 (* 1 = 0.577317 loss) +I0115 00:48:40.041231 12987 sgd_solver.cpp:106] Iteration 534000, lr = 0.001 +I0115 00:57:34.402633 12987 solver.cpp:228] Iteration 535000, loss = 0.637666 +I0115 00:57:34.402840 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 00:57:34.402853 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 00:57:34.402870 12987 solver.cpp:244] Train net output #2: prob = 0.67392 (* 1 = 0.67392 loss) +I0115 00:57:34.655104 12987 sgd_solver.cpp:106] Iteration 535000, lr = 0.001 +I0115 01:06:29.316146 12987 solver.cpp:228] Iteration 536000, loss = 0.621793 +I0115 01:06:29.316345 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 01:06:29.316357 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 01:06:29.316372 12987 solver.cpp:244] Train net output #2: prob = 0.741875 (* 1 = 0.741875 loss) +I0115 01:06:29.569653 12987 sgd_solver.cpp:106] Iteration 536000, lr = 0.001 +I0115 01:15:22.694576 12987 solver.cpp:228] Iteration 537000, loss = 0.618505 +I0115 01:15:22.694794 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0115 01:15:22.694806 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 01:15:22.694820 12987 solver.cpp:244] Train net output #2: prob = 0.980444 (* 1 = 0.980444 loss) +I0115 01:15:22.948323 12987 sgd_solver.cpp:106] Iteration 537000, lr = 0.001 +I0115 01:24:17.069094 12987 solver.cpp:228] Iteration 538000, loss = 0.61187 +I0115 01:24:17.069283 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0115 01:24:17.069294 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 01:24:17.069306 12987 solver.cpp:244] Train net output #2: prob = 0.728381 (* 1 = 0.728381 loss) +I0115 01:24:17.341565 12987 sgd_solver.cpp:106] Iteration 538000, lr = 0.001 +I0115 01:33:11.983414 12987 solver.cpp:228] Iteration 539000, loss = 0.594174 +I0115 01:33:11.983631 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 01:33:11.983657 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 01:33:11.983680 12987 solver.cpp:244] Train net output #2: prob = 0.464529 (* 1 = 0.464529 loss) +I0115 01:33:12.240525 12987 sgd_solver.cpp:106] Iteration 539000, lr = 0.001 +I0115 01:42:06.296293 12987 solver.cpp:228] Iteration 540000, loss = 0.595848 +I0115 01:42:06.296566 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0115 01:42:06.296583 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 01:42:06.296600 12987 solver.cpp:244] Train net output #2: prob = 0.606028 (* 1 = 0.606028 loss) +I0115 01:42:06.549970 12987 sgd_solver.cpp:106] Iteration 540000, lr = 0.001 +I0115 01:51:00.541117 12987 solver.cpp:228] Iteration 541000, loss = 0.635799 +I0115 01:51:00.541324 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 01:51:00.541354 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 01:51:00.541373 12987 solver.cpp:244] Train net output #2: prob = 0.84322 (* 1 = 0.84322 loss) +I0115 01:51:00.794340 12987 sgd_solver.cpp:106] Iteration 541000, lr = 0.001 +I0115 01:59:53.951864 12987 solver.cpp:228] Iteration 542000, loss = 0.655961 +I0115 01:59:53.952045 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 01:59:53.952057 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 01:59:53.952069 12987 solver.cpp:244] Train net output #2: prob = 0.705618 (* 1 = 0.705618 loss) +I0115 01:59:54.210785 12987 sgd_solver.cpp:106] Iteration 542000, lr = 0.001 +I0115 02:08:48.372629 12987 solver.cpp:228] Iteration 543000, loss = 0.625387 +I0115 02:08:48.372798 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0115 02:08:48.372810 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 02:08:48.372828 12987 solver.cpp:244] Train net output #2: prob = 0.846294 (* 1 = 0.846294 loss) +I0115 02:08:48.652101 12987 sgd_solver.cpp:106] Iteration 543000, lr = 0.001 +I0115 02:17:42.517701 12987 solver.cpp:228] Iteration 544000, loss = 0.627649 +I0115 02:17:42.517899 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.65625 +I0115 02:17:42.517913 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 02:17:42.517930 12987 solver.cpp:244] Train net output #2: prob = 1.04071 (* 1 = 1.04071 loss) +I0115 02:17:42.769724 12987 sgd_solver.cpp:106] Iteration 544000, lr = 0.001 +I0115 02:26:38.261200 12987 solver.cpp:228] Iteration 545000, loss = 0.634147 +I0115 02:26:38.261409 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 02:26:38.261422 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 02:26:38.261435 12987 solver.cpp:244] Train net output #2: prob = 0.450291 (* 1 = 0.450291 loss) +I0115 02:26:38.520571 12987 sgd_solver.cpp:106] Iteration 545000, lr = 0.001 +I0115 02:35:31.280534 12987 solver.cpp:228] Iteration 546000, loss = 0.618024 +I0115 02:35:31.280700 12987 solver.cpp:244] Train net output #0: accuracy@1 = 1 +I0115 02:35:31.280712 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 02:35:31.280725 12987 solver.cpp:244] Train net output #2: prob = 0.212397 (* 1 = 0.212397 loss) +I0115 02:35:31.539454 12987 sgd_solver.cpp:106] Iteration 546000, lr = 0.001 +I0115 02:44:24.607051 12987 solver.cpp:228] Iteration 547000, loss = 0.605329 +I0115 02:44:24.607247 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 02:44:24.607259 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 02:44:24.607272 12987 solver.cpp:244] Train net output #2: prob = 0.405387 (* 1 = 0.405387 loss) +I0115 02:44:24.860167 12987 sgd_solver.cpp:106] Iteration 547000, lr = 0.001 +I0115 02:53:19.248225 12987 solver.cpp:228] Iteration 548000, loss = 0.591958 +I0115 02:53:19.248422 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 02:53:19.248435 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 02:53:19.248450 12987 solver.cpp:244] Train net output #2: prob = 0.61259 (* 1 = 0.61259 loss) +I0115 02:53:19.505960 12987 sgd_solver.cpp:106] Iteration 548000, lr = 0.001 +I0115 03:02:12.793915 12987 solver.cpp:228] Iteration 549000, loss = 0.599268 +I0115 03:02:12.794124 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 03:02:12.794136 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 03:02:12.794152 12987 solver.cpp:244] Train net output #2: prob = 0.552517 (* 1 = 0.552517 loss) +I0115 03:02:13.045511 12987 sgd_solver.cpp:106] Iteration 549000, lr = 0.001 +I0115 03:11:06.301487 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_550000.caffemodel +I0115 03:11:12.395777 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_550000.solverstate +I0115 03:11:12.921994 12987 solver.cpp:228] Iteration 550000, loss = 0.576431 +I0115 03:11:12.922034 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 03:11:12.922044 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 03:11:12.922056 12987 solver.cpp:244] Train net output #2: prob = 0.397041 (* 1 = 0.397041 loss) +I0115 03:11:13.180261 12987 sgd_solver.cpp:106] Iteration 550000, lr = 0.001 +I0115 03:20:06.306354 12987 solver.cpp:228] Iteration 551000, loss = 0.626702 +I0115 03:20:06.306612 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 03:20:06.306625 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 03:20:06.306641 12987 solver.cpp:244] Train net output #2: prob = 0.787295 (* 1 = 0.787295 loss) +I0115 03:20:06.570129 12987 sgd_solver.cpp:106] Iteration 551000, lr = 0.001 +I0115 03:29:00.488876 12987 solver.cpp:228] Iteration 552000, loss = 0.63358 +I0115 03:29:00.489089 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 03:29:00.489100 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 03:29:00.489116 12987 solver.cpp:244] Train net output #2: prob = 0.711719 (* 1 = 0.711719 loss) +I0115 03:29:00.743316 12987 sgd_solver.cpp:106] Iteration 552000, lr = 0.001 +I0115 03:37:54.442289 12987 solver.cpp:228] Iteration 553000, loss = 0.628663 +I0115 03:37:54.442525 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 03:37:54.442553 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 03:37:54.442580 12987 solver.cpp:244] Train net output #2: prob = 0.544783 (* 1 = 0.544783 loss) +I0115 03:37:54.696462 12987 sgd_solver.cpp:106] Iteration 553000, lr = 0.001 +I0115 03:46:48.009506 12987 solver.cpp:228] Iteration 554000, loss = 0.640378 +I0115 03:46:48.009754 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0115 03:46:48.009783 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0115 03:46:48.009810 12987 solver.cpp:244] Train net output #2: prob = 0.944718 (* 1 = 0.944718 loss) +I0115 03:46:48.263068 12987 sgd_solver.cpp:106] Iteration 554000, lr = 0.001 +I0115 03:55:41.263037 12987 solver.cpp:228] Iteration 555000, loss = 0.623096 +I0115 03:55:41.263279 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0115 03:55:41.263314 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0115 03:55:41.263334 12987 solver.cpp:244] Train net output #2: prob = 0.988557 (* 1 = 0.988557 loss) +I0115 03:55:41.521826 12987 sgd_solver.cpp:106] Iteration 555000, lr = 0.001 +I0115 04:04:36.107378 12987 solver.cpp:228] Iteration 556000, loss = 0.613679 +I0115 04:04:36.107591 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 04:04:36.107604 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 04:04:36.107619 12987 solver.cpp:244] Train net output #2: prob = 0.841042 (* 1 = 0.841042 loss) +I0115 04:04:36.360550 12987 sgd_solver.cpp:106] Iteration 556000, lr = 0.001 +I0115 04:13:29.397989 12987 solver.cpp:228] Iteration 557000, loss = 0.599885 +I0115 04:13:29.398319 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 04:13:29.398353 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 04:13:29.398392 12987 solver.cpp:244] Train net output #2: prob = 0.683036 (* 1 = 0.683036 loss) +I0115 04:13:29.635906 12987 sgd_solver.cpp:106] Iteration 557000, lr = 0.001 +I0115 04:22:24.452397 12987 solver.cpp:228] Iteration 558000, loss = 0.59362 +I0115 04:22:24.452605 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 04:22:24.452616 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 04:22:24.452632 12987 solver.cpp:244] Train net output #2: prob = 0.434248 (* 1 = 0.434248 loss) +I0115 04:22:24.745534 12987 sgd_solver.cpp:106] Iteration 558000, lr = 0.001 +I0115 04:31:19.020087 12987 solver.cpp:228] Iteration 559000, loss = 0.593981 +I0115 04:31:19.020262 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0115 04:31:19.020272 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 04:31:19.020282 12987 solver.cpp:244] Train net output #2: prob = 0.862849 (* 1 = 0.862849 loss) +I0115 04:31:19.296316 12987 sgd_solver.cpp:106] Iteration 559000, lr = 0.001 +I0115 04:40:12.815305 12987 solver.cpp:228] Iteration 560000, loss = 0.560598 +I0115 04:40:12.815523 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0115 04:40:12.815534 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 04:40:12.815549 12987 solver.cpp:244] Train net output #2: prob = 0.263832 (* 1 = 0.263832 loss) +I0115 04:40:13.071719 12987 sgd_solver.cpp:106] Iteration 560000, lr = 0.001 +I0115 04:49:06.203357 12987 solver.cpp:228] Iteration 561000, loss = 0.61853 +I0115 04:49:06.203567 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 04:49:06.203578 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 04:49:06.203593 12987 solver.cpp:244] Train net output #2: prob = 0.527032 (* 1 = 0.527032 loss) +I0115 04:49:06.454496 12987 sgd_solver.cpp:106] Iteration 561000, lr = 0.001 +I0115 04:57:59.036803 12987 solver.cpp:228] Iteration 562000, loss = 0.619859 +I0115 04:57:59.036985 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 04:57:59.036998 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0115 04:57:59.037010 12987 solver.cpp:244] Train net output #2: prob = 0.677099 (* 1 = 0.677099 loss) +I0115 04:57:59.278051 12987 sgd_solver.cpp:106] Iteration 562000, lr = 0.001 +I0115 05:06:52.012377 12987 solver.cpp:228] Iteration 563000, loss = 0.604031 +I0115 05:06:52.012563 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0115 05:06:52.012573 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0115 05:06:52.012583 12987 solver.cpp:244] Train net output #2: prob = 0.906741 (* 1 = 0.906741 loss) +I0115 05:06:52.270561 12987 sgd_solver.cpp:106] Iteration 563000, lr = 0.001 +I0115 05:15:45.020032 12987 solver.cpp:228] Iteration 564000, loss = 0.617869 +I0115 05:15:45.020481 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 05:15:45.020567 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 05:15:45.020642 12987 solver.cpp:244] Train net output #2: prob = 0.40399 (* 1 = 0.40399 loss) +I0115 05:15:45.301502 12987 sgd_solver.cpp:106] Iteration 564000, lr = 0.001 +I0115 05:24:37.586289 12987 solver.cpp:228] Iteration 565000, loss = 0.607594 +I0115 05:24:37.586474 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 05:24:37.586488 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 05:24:37.586503 12987 solver.cpp:244] Train net output #2: prob = 0.573729 (* 1 = 0.573729 loss) +I0115 05:24:37.839349 12987 sgd_solver.cpp:106] Iteration 565000, lr = 0.001 +I0115 05:33:31.883738 12987 solver.cpp:228] Iteration 566000, loss = 0.61291 +I0115 05:33:31.883935 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0115 05:33:31.883949 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 05:33:31.883963 12987 solver.cpp:244] Train net output #2: prob = 0.789807 (* 1 = 0.789807 loss) +I0115 05:33:32.138223 12987 sgd_solver.cpp:106] Iteration 566000, lr = 0.001 +I0115 05:42:25.991786 12987 solver.cpp:228] Iteration 567000, loss = 0.596171 +I0115 05:42:25.991988 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 05:42:25.992002 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 05:42:25.992015 12987 solver.cpp:244] Train net output #2: prob = 0.556646 (* 1 = 0.556646 loss) +I0115 05:42:26.242938 12987 sgd_solver.cpp:106] Iteration 567000, lr = 0.001 +I0115 05:51:19.339040 12987 solver.cpp:228] Iteration 568000, loss = 0.582027 +I0115 05:51:19.339262 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0115 05:51:19.339277 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 05:51:19.339295 12987 solver.cpp:244] Train net output #2: prob = 0.262405 (* 1 = 0.262405 loss) +I0115 05:51:19.591356 12987 sgd_solver.cpp:106] Iteration 568000, lr = 0.001 +I0115 06:00:13.541844 12987 solver.cpp:228] Iteration 569000, loss = 0.57248 +I0115 06:00:13.542042 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 06:00:13.542054 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 06:00:13.542070 12987 solver.cpp:244] Train net output #2: prob = 0.34339 (* 1 = 0.34339 loss) +I0115 06:00:13.794057 12987 sgd_solver.cpp:106] Iteration 569000, lr = 0.001 +I0115 06:09:07.391160 12987 solver.cpp:228] Iteration 570000, loss = 0.571567 +I0115 06:09:07.391366 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 06:09:07.391378 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 06:09:07.391396 12987 solver.cpp:244] Train net output #2: prob = 0.90576 (* 1 = 0.90576 loss) +I0115 06:09:07.663236 12987 sgd_solver.cpp:106] Iteration 570000, lr = 0.001 +I0115 06:18:01.191520 12987 solver.cpp:228] Iteration 571000, loss = 0.602657 +I0115 06:18:01.191712 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 06:18:01.191725 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 06:18:01.191740 12987 solver.cpp:244] Train net output #2: prob = 0.73147 (* 1 = 0.73147 loss) +I0115 06:18:01.469243 12987 sgd_solver.cpp:106] Iteration 571000, lr = 0.001 +I0115 06:26:54.586043 12987 solver.cpp:228] Iteration 572000, loss = 0.614799 +I0115 06:26:54.586251 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 06:26:54.586262 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 06:26:54.586277 12987 solver.cpp:244] Train net output #2: prob = 0.381695 (* 1 = 0.381695 loss) +I0115 06:26:54.872800 12987 sgd_solver.cpp:106] Iteration 572000, lr = 0.001 +I0115 06:35:47.782337 12987 solver.cpp:228] Iteration 573000, loss = 0.597841 +I0115 06:35:47.782559 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 06:35:47.782589 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 06:35:47.782618 12987 solver.cpp:244] Train net output #2: prob = 0.554308 (* 1 = 0.554308 loss) +I0115 06:35:48.033545 12987 sgd_solver.cpp:106] Iteration 573000, lr = 0.001 +I0115 06:44:41.233393 12987 solver.cpp:228] Iteration 574000, loss = 0.60254 +I0115 06:44:41.233760 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 06:44:41.233829 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 06:44:41.233867 12987 solver.cpp:244] Train net output #2: prob = 0.658025 (* 1 = 0.658025 loss) +I0115 06:44:41.487432 12987 sgd_solver.cpp:106] Iteration 574000, lr = 0.001 +I0115 06:53:35.191992 12987 solver.cpp:228] Iteration 575000, loss = 0.604105 +I0115 06:53:35.192188 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.6875 +I0115 06:53:35.192200 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 06:53:35.192216 12987 solver.cpp:244] Train net output #2: prob = 0.719364 (* 1 = 0.719364 loss) +I0115 06:53:35.442970 12987 sgd_solver.cpp:106] Iteration 575000, lr = 0.001 +I0115 07:02:28.053282 12987 solver.cpp:228] Iteration 576000, loss = 0.598975 +I0115 07:02:28.053478 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 07:02:28.053490 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 07:02:28.053505 12987 solver.cpp:244] Train net output #2: prob = 0.515268 (* 1 = 0.515268 loss) +I0115 07:02:28.324988 12987 sgd_solver.cpp:106] Iteration 576000, lr = 0.001 +I0115 07:11:22.140409 12987 solver.cpp:228] Iteration 577000, loss = 0.584946 +I0115 07:11:22.140614 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 07:11:22.140627 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 07:11:22.140645 12987 solver.cpp:244] Train net output #2: prob = 0.556964 (* 1 = 0.556964 loss) +I0115 07:11:22.423475 12987 sgd_solver.cpp:106] Iteration 577000, lr = 0.001 +I0115 07:20:15.329861 12987 solver.cpp:228] Iteration 578000, loss = 0.583159 +I0115 07:20:15.330037 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 07:20:15.330049 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 07:20:15.330065 12987 solver.cpp:244] Train net output #2: prob = 0.947709 (* 1 = 0.947709 loss) +I0115 07:20:15.586036 12987 sgd_solver.cpp:106] Iteration 578000, lr = 0.001 +I0115 07:29:08.673458 12987 solver.cpp:228] Iteration 579000, loss = 0.56717 +I0115 07:29:08.673681 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 07:29:08.673718 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 07:29:08.673751 12987 solver.cpp:244] Train net output #2: prob = 0.605878 (* 1 = 0.605878 loss) +I0115 07:29:08.921898 12987 sgd_solver.cpp:106] Iteration 579000, lr = 0.001 +I0115 07:38:01.919344 12987 solver.cpp:228] Iteration 580000, loss = 0.571762 +I0115 07:38:01.919512 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0115 07:38:01.919520 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0115 07:38:01.919529 12987 solver.cpp:244] Train net output #2: prob = 1.03431 (* 1 = 1.03431 loss) +I0115 07:38:02.173140 12987 sgd_solver.cpp:106] Iteration 580000, lr = 0.001 +I0115 07:46:54.850402 12987 solver.cpp:228] Iteration 581000, loss = 0.597839 +I0115 07:46:54.850585 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 07:46:54.850594 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 07:46:54.850605 12987 solver.cpp:244] Train net output #2: prob = 0.482505 (* 1 = 0.482505 loss) +I0115 07:46:55.105386 12987 sgd_solver.cpp:106] Iteration 581000, lr = 0.001 +I0115 07:55:46.841110 12987 solver.cpp:228] Iteration 582000, loss = 0.623363 +I0115 07:55:46.841302 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 07:55:46.841315 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 07:55:46.841328 12987 solver.cpp:244] Train net output #2: prob = 0.472892 (* 1 = 0.472892 loss) +I0115 07:55:47.094759 12987 sgd_solver.cpp:106] Iteration 582000, lr = 0.001 +I0115 08:04:39.856218 12987 solver.cpp:228] Iteration 583000, loss = 0.597866 +I0115 08:04:39.856436 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0115 08:04:39.856449 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 08:04:39.856470 12987 solver.cpp:244] Train net output #2: prob = 0.290299 (* 1 = 0.290299 loss) +I0115 08:04:40.100502 12987 sgd_solver.cpp:106] Iteration 583000, lr = 0.001 +I0115 08:13:33.490595 12987 solver.cpp:228] Iteration 584000, loss = 0.596947 +I0115 08:13:33.490788 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0115 08:13:33.490800 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 08:13:33.490818 12987 solver.cpp:244] Train net output #2: prob = 0.730538 (* 1 = 0.730538 loss) +I0115 08:13:33.785742 12987 sgd_solver.cpp:106] Iteration 584000, lr = 0.001 +I0115 08:22:26.949491 12987 solver.cpp:228] Iteration 585000, loss = 0.605617 +I0115 08:22:26.949704 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 08:22:26.949717 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 08:22:26.949729 12987 solver.cpp:244] Train net output #2: prob = 0.758112 (* 1 = 0.758112 loss) +I0115 08:22:27.199566 12987 sgd_solver.cpp:106] Iteration 585000, lr = 0.001 +I0115 08:31:21.063035 12987 solver.cpp:228] Iteration 586000, loss = 0.588411 +I0115 08:31:21.065470 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0115 08:31:21.065539 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 08:31:21.065610 12987 solver.cpp:244] Train net output #2: prob = 0.823551 (* 1 = 0.823551 loss) +I0115 08:31:21.314767 12987 sgd_solver.cpp:106] Iteration 586000, lr = 0.001 +I0115 08:40:14.116818 12987 solver.cpp:228] Iteration 587000, loss = 0.5861 +I0115 08:40:14.117010 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 08:40:14.117022 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 08:40:14.117033 12987 solver.cpp:244] Train net output #2: prob = 0.391285 (* 1 = 0.391285 loss) +I0115 08:40:14.360863 12987 sgd_solver.cpp:106] Iteration 587000, lr = 0.001 +I0115 08:48:58.427788 12987 solver.cpp:228] Iteration 588000, loss = 0.566841 +I0115 08:48:58.428046 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 08:48:58.428077 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0115 08:48:58.428089 12987 solver.cpp:244] Train net output #2: prob = 0.59884 (* 1 = 0.59884 loss) +I0115 08:48:58.678939 12987 sgd_solver.cpp:106] Iteration 588000, lr = 0.001 +I0115 08:57:44.459504 12987 solver.cpp:228] Iteration 589000, loss = 0.57647 +I0115 08:57:44.459820 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0115 08:57:44.459861 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 08:57:44.459897 12987 solver.cpp:244] Train net output #2: prob = 0.19525 (* 1 = 0.19525 loss) +I0115 08:57:44.711845 12987 sgd_solver.cpp:106] Iteration 589000, lr = 0.001 +I0115 09:06:30.823199 12987 solver.cpp:228] Iteration 590000, loss = 0.560806 +I0115 09:06:30.823401 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0115 09:06:30.823410 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 09:06:30.823423 12987 solver.cpp:244] Train net output #2: prob = 0.525271 (* 1 = 0.525271 loss) +I0115 09:06:31.076470 12987 sgd_solver.cpp:106] Iteration 590000, lr = 0.001 +I0115 09:15:17.053411 12987 solver.cpp:228] Iteration 591000, loss = 0.595787 +I0115 09:15:17.053740 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 09:15:17.053786 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 09:15:17.053818 12987 solver.cpp:244] Train net output #2: prob = 0.362841 (* 1 = 0.362841 loss) +I0115 09:15:17.295377 12987 sgd_solver.cpp:106] Iteration 591000, lr = 0.001 +I0115 09:24:03.293186 12987 solver.cpp:228] Iteration 592000, loss = 0.610298 +I0115 09:24:03.293443 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 09:24:03.293473 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 09:24:03.293490 12987 solver.cpp:244] Train net output #2: prob = 0.780388 (* 1 = 0.780388 loss) +I0115 09:24:03.544358 12987 sgd_solver.cpp:106] Iteration 592000, lr = 0.001 +I0115 09:32:49.455039 12987 solver.cpp:228] Iteration 593000, loss = 0.595825 +I0115 09:32:49.455299 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 09:32:49.455310 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 09:32:49.455323 12987 solver.cpp:244] Train net output #2: prob = 0.657879 (* 1 = 0.657879 loss) +I0115 09:32:49.707329 12987 sgd_solver.cpp:106] Iteration 593000, lr = 0.001 +I0115 09:41:36.456976 12987 solver.cpp:228] Iteration 594000, loss = 0.618759 +I0115 09:41:36.457212 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 09:41:36.457243 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 09:41:36.457270 12987 solver.cpp:244] Train net output #2: prob = 0.641508 (* 1 = 0.641508 loss) +I0115 09:41:36.710115 12987 sgd_solver.cpp:106] Iteration 594000, lr = 0.001 +I0115 09:50:22.428078 12987 solver.cpp:228] Iteration 595000, loss = 0.596953 +I0115 09:50:22.428372 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0115 09:50:22.428405 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.875 +I0115 09:50:22.428474 12987 solver.cpp:244] Train net output #2: prob = 1.05799 (* 1 = 1.05799 loss) +I0115 09:50:22.683678 12987 sgd_solver.cpp:106] Iteration 595000, lr = 0.001 +I0115 09:59:08.436136 12987 solver.cpp:228] Iteration 596000, loss = 0.589051 +I0115 09:59:08.436835 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 09:59:08.436851 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 09:59:08.436864 12987 solver.cpp:244] Train net output #2: prob = 0.536494 (* 1 = 0.536494 loss) +I0115 09:59:08.689191 12987 sgd_solver.cpp:106] Iteration 596000, lr = 0.001 +I0115 10:07:54.384691 12987 solver.cpp:228] Iteration 597000, loss = 0.576962 +I0115 10:07:54.385009 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 10:07:54.385041 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 10:07:54.385067 12987 solver.cpp:244] Train net output #2: prob = 0.325271 (* 1 = 0.325271 loss) +I0115 10:07:54.636898 12987 sgd_solver.cpp:106] Iteration 597000, lr = 0.001 +I0115 10:16:40.496223 12987 solver.cpp:228] Iteration 598000, loss = 0.573768 +I0115 10:16:40.496439 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 10:16:40.496453 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 10:16:40.496465 12987 solver.cpp:244] Train net output #2: prob = 0.562022 (* 1 = 0.562022 loss) +I0115 10:16:40.749519 12987 sgd_solver.cpp:106] Iteration 598000, lr = 0.001 +I0115 10:25:26.089150 12987 solver.cpp:228] Iteration 599000, loss = 0.575631 +I0115 10:25:26.089457 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 10:25:26.089507 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 10:25:26.089537 12987 solver.cpp:244] Train net output #2: prob = 0.745875 (* 1 = 0.745875 loss) +I0115 10:25:26.332500 12987 sgd_solver.cpp:106] Iteration 599000, lr = 0.001 +I0115 10:34:11.674139 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_600000.caffemodel +I0115 10:34:13.442016 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_600000.solverstate +I0115 10:34:13.856178 12987 solver.cpp:228] Iteration 600000, loss = 0.548823 +I0115 10:34:13.856236 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 10:34:13.856250 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 10:34:13.856349 12987 solver.cpp:244] Train net output #2: prob = 0.461306 (* 1 = 0.461306 loss) +I0115 10:34:14.122973 12987 sgd_solver.cpp:106] Iteration 600000, lr = 0.0001 +I0115 10:42:59.850090 12987 solver.cpp:228] Iteration 601000, loss = 0.589572 +I0115 10:42:59.850378 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 10:42:59.850412 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 10:42:59.850436 12987 solver.cpp:244] Train net output #2: prob = 0.683375 (* 1 = 0.683375 loss) +I0115 10:43:00.106789 12987 sgd_solver.cpp:106] Iteration 601000, lr = 0.0001 +I0115 10:51:45.468992 12987 solver.cpp:228] Iteration 602000, loss = 0.576765 +I0115 10:51:45.469192 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0115 10:51:45.469202 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0115 10:51:45.469215 12987 solver.cpp:244] Train net output #2: prob = 1.15785 (* 1 = 1.15785 loss) +I0115 10:51:45.721509 12987 sgd_solver.cpp:106] Iteration 602000, lr = 0.0001 +I0115 11:00:31.356057 12987 solver.cpp:228] Iteration 603000, loss = 0.553665 +I0115 11:00:31.356290 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 11:00:31.356300 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0115 11:00:31.356314 12987 solver.cpp:244] Train net output #2: prob = 1.14098 (* 1 = 1.14098 loss) +I0115 11:00:31.609473 12987 sgd_solver.cpp:106] Iteration 603000, lr = 0.0001 +I0115 11:09:17.559314 12987 solver.cpp:228] Iteration 604000, loss = 0.561185 +I0115 11:09:17.559559 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 11:09:17.559586 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 11:09:17.559602 12987 solver.cpp:244] Train net output #2: prob = 0.587136 (* 1 = 0.587136 loss) +I0115 11:09:17.812618 12987 sgd_solver.cpp:106] Iteration 604000, lr = 0.0001 +I0115 11:18:03.481889 12987 solver.cpp:228] Iteration 605000, loss = 0.54309 +I0115 11:18:03.482213 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0115 11:18:03.482264 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 11:18:03.482303 12987 solver.cpp:244] Train net output #2: prob = 0.537296 (* 1 = 0.537296 loss) +I0115 11:18:03.734732 12987 sgd_solver.cpp:106] Iteration 605000, lr = 0.0001 +I0115 11:26:49.585582 12987 solver.cpp:228] Iteration 606000, loss = 0.536927 +I0115 11:26:49.585878 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 11:26:49.585917 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 11:26:49.585937 12987 solver.cpp:244] Train net output #2: prob = 0.449344 (* 1 = 0.449344 loss) +I0115 11:26:49.837548 12987 sgd_solver.cpp:106] Iteration 606000, lr = 0.0001 +I0115 11:35:35.568650 12987 solver.cpp:228] Iteration 607000, loss = 0.511831 +I0115 11:35:35.568852 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 11:35:35.568862 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 11:35:35.568874 12987 solver.cpp:244] Train net output #2: prob = 0.397076 (* 1 = 0.397076 loss) +I0115 11:35:35.821801 12987 sgd_solver.cpp:106] Iteration 607000, lr = 0.0001 +I0115 11:44:21.549676 12987 solver.cpp:228] Iteration 608000, loss = 0.491045 +I0115 11:44:21.549926 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 11:44:21.549952 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 11:44:21.549975 12987 solver.cpp:244] Train net output #2: prob = 0.511477 (* 1 = 0.511477 loss) +I0115 11:44:21.801808 12987 sgd_solver.cpp:106] Iteration 608000, lr = 0.0001 +I0115 11:53:07.824211 12987 solver.cpp:228] Iteration 609000, loss = 0.474467 +I0115 11:53:07.824429 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 11:53:07.824440 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 11:53:07.824452 12987 solver.cpp:244] Train net output #2: prob = 0.368383 (* 1 = 0.368383 loss) +I0115 11:53:08.074779 12987 sgd_solver.cpp:106] Iteration 609000, lr = 0.0001 +I0115 12:01:54.285970 12987 solver.cpp:228] Iteration 610000, loss = 0.456817 +I0115 12:01:54.286175 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 12:01:54.286186 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 12:01:54.286200 12987 solver.cpp:244] Train net output #2: prob = 0.516197 (* 1 = 0.516197 loss) +I0115 12:01:54.536507 12987 sgd_solver.cpp:106] Iteration 610000, lr = 0.0001 +I0115 12:10:40.192833 12987 solver.cpp:228] Iteration 611000, loss = 0.541707 +I0115 12:10:40.193034 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 12:10:40.193044 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 12:10:40.193058 12987 solver.cpp:244] Train net output #2: prob = 0.432904 (* 1 = 0.432904 loss) +I0115 12:10:40.446204 12987 sgd_solver.cpp:106] Iteration 611000, lr = 0.0001 +I0115 12:19:26.760488 12987 solver.cpp:228] Iteration 612000, loss = 0.551817 +I0115 12:19:26.760727 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 12:19:26.760740 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 12:19:26.760754 12987 solver.cpp:244] Train net output #2: prob = 0.782281 (* 1 = 0.782281 loss) +I0115 12:19:27.015444 12987 sgd_solver.cpp:106] Iteration 612000, lr = 0.0001 +I0115 12:28:13.603904 12987 solver.cpp:228] Iteration 613000, loss = 0.533237 +I0115 12:28:13.604159 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 12:28:13.604192 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 12:28:13.604205 12987 solver.cpp:244] Train net output #2: prob = 0.614332 (* 1 = 0.614332 loss) +I0115 12:28:13.884671 12987 sgd_solver.cpp:106] Iteration 613000, lr = 0.0001 +I0115 12:37:00.059378 12987 solver.cpp:228] Iteration 614000, loss = 0.53506 +I0115 12:37:00.059589 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 12:37:00.059600 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 12:37:00.059612 12987 solver.cpp:244] Train net output #2: prob = 0.27533 (* 1 = 0.27533 loss) +I0115 12:37:00.308502 12987 sgd_solver.cpp:106] Iteration 614000, lr = 0.0001 +I0115 12:45:46.120331 12987 solver.cpp:228] Iteration 615000, loss = 0.530013 +I0115 12:45:46.120538 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 12:45:46.120548 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 12:45:46.120563 12987 solver.cpp:244] Train net output #2: prob = 0.570711 (* 1 = 0.570711 loss) +I0115 12:45:46.374506 12987 sgd_solver.cpp:106] Iteration 615000, lr = 0.0001 +I0115 12:54:32.612540 12987 solver.cpp:228] Iteration 616000, loss = 0.514762 +I0115 12:54:32.612736 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 12:54:32.612746 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 12:54:32.612757 12987 solver.cpp:244] Train net output #2: prob = 0.299124 (* 1 = 0.299124 loss) +I0115 12:54:32.865454 12987 sgd_solver.cpp:106] Iteration 616000, lr = 0.0001 +I0115 13:03:18.495533 12987 solver.cpp:228] Iteration 617000, loss = 0.510684 +I0115 13:03:18.495806 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 13:03:18.495836 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 13:03:18.495862 12987 solver.cpp:244] Train net output #2: prob = 0.383208 (* 1 = 0.383208 loss) +I0115 13:03:18.750604 12987 sgd_solver.cpp:106] Iteration 617000, lr = 0.0001 +I0115 13:12:04.532943 12987 solver.cpp:228] Iteration 618000, loss = 0.489049 +I0115 13:12:04.533234 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 13:12:04.533263 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0115 13:12:04.533282 12987 solver.cpp:244] Train net output #2: prob = 1.31985 (* 1 = 1.31985 loss) +I0115 13:12:04.787750 12987 sgd_solver.cpp:106] Iteration 618000, lr = 0.0001 +I0115 13:20:51.007050 12987 solver.cpp:228] Iteration 619000, loss = 0.472406 +I0115 13:20:51.007285 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 13:20:51.007310 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0115 13:20:51.007321 12987 solver.cpp:244] Train net output #2: prob = 0.78205 (* 1 = 0.78205 loss) +I0115 13:20:51.260061 12987 sgd_solver.cpp:106] Iteration 619000, lr = 0.0001 +I0115 13:29:37.629465 12987 solver.cpp:228] Iteration 620000, loss = 0.465695 +I0115 13:29:37.629690 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 13:29:37.629700 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 13:29:37.629714 12987 solver.cpp:244] Train net output #2: prob = 0.503809 (* 1 = 0.503809 loss) +I0115 13:29:37.883460 12987 sgd_solver.cpp:106] Iteration 620000, lr = 0.0001 +I0115 13:38:23.921417 12987 solver.cpp:228] Iteration 621000, loss = 0.544949 +I0115 13:38:23.921633 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 13:38:23.921643 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 13:38:23.921656 12987 solver.cpp:244] Train net output #2: prob = 0.468958 (* 1 = 0.468958 loss) +I0115 13:38:24.178995 12987 sgd_solver.cpp:106] Iteration 621000, lr = 0.0001 +I0115 13:47:10.085840 12987 solver.cpp:228] Iteration 622000, loss = 0.551162 +I0115 13:47:10.086151 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 13:47:10.086171 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 13:47:10.086192 12987 solver.cpp:244] Train net output #2: prob = 0.457917 (* 1 = 0.457917 loss) +I0115 13:47:10.340061 12987 sgd_solver.cpp:106] Iteration 622000, lr = 0.0001 +I0115 13:55:56.482764 12987 solver.cpp:228] Iteration 623000, loss = 0.533293 +I0115 13:55:56.482991 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 13:55:56.483019 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 13:55:56.483047 12987 solver.cpp:244] Train net output #2: prob = 0.616471 (* 1 = 0.616471 loss) +I0115 13:55:56.738677 12987 sgd_solver.cpp:106] Iteration 623000, lr = 0.0001 +I0115 14:04:52.015491 12987 solver.cpp:228] Iteration 624000, loss = 0.52817 +I0115 14:04:52.015661 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 14:04:52.015671 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 14:04:52.015684 12987 solver.cpp:244] Train net output #2: prob = 0.690485 (* 1 = 0.690485 loss) +I0115 14:04:52.269068 12987 sgd_solver.cpp:106] Iteration 624000, lr = 0.0001 +I0115 14:13:43.390642 12987 solver.cpp:228] Iteration 625000, loss = 0.524674 +I0115 14:13:43.390890 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 14:13:43.390899 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 14:13:43.390913 12987 solver.cpp:244] Train net output #2: prob = 0.630527 (* 1 = 0.630527 loss) +I0115 14:13:43.640198 12987 sgd_solver.cpp:106] Iteration 625000, lr = 0.0001 +I0115 14:22:29.925473 12987 solver.cpp:228] Iteration 626000, loss = 0.512731 +I0115 14:22:29.931599 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 14:22:29.931634 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0115 14:22:29.931664 12987 solver.cpp:244] Train net output #2: prob = 0.646177 (* 1 = 0.646177 loss) +I0115 14:22:30.179355 12987 sgd_solver.cpp:106] Iteration 626000, lr = 0.0001 +I0115 14:31:25.615149 12987 solver.cpp:228] Iteration 627000, loss = 0.492435 +I0115 14:31:25.615453 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 14:31:25.615504 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 14:31:25.615545 12987 solver.cpp:244] Train net output #2: prob = 0.666145 (* 1 = 0.666145 loss) +I0115 14:31:25.874408 12987 sgd_solver.cpp:106] Iteration 627000, lr = 0.0001 +I0115 14:40:18.991466 12987 solver.cpp:228] Iteration 628000, loss = 0.476942 +I0115 14:40:18.991746 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 14:40:18.991763 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 14:40:18.991780 12987 solver.cpp:244] Train net output #2: prob = 0.387154 (* 1 = 0.387154 loss) +I0115 14:40:19.250485 12987 sgd_solver.cpp:106] Iteration 628000, lr = 0.0001 +I0115 14:49:05.636173 12987 solver.cpp:228] Iteration 629000, loss = 0.476729 +I0115 14:49:05.636389 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 14:49:05.636399 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 14:49:05.636411 12987 solver.cpp:244] Train net output #2: prob = 0.627398 (* 1 = 0.627398 loss) +I0115 14:49:05.890450 12987 sgd_solver.cpp:106] Iteration 629000, lr = 0.0001 +I0115 14:57:52.480188 12987 solver.cpp:228] Iteration 630000, loss = 0.45266 +I0115 14:57:52.480408 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0115 14:57:52.480432 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 14:57:52.480448 12987 solver.cpp:244] Train net output #2: prob = 0.171415 (* 1 = 0.171415 loss) +I0115 14:57:52.731149 12987 sgd_solver.cpp:106] Iteration 630000, lr = 0.0001 +I0115 15:06:39.291931 12987 solver.cpp:228] Iteration 631000, loss = 0.530391 +I0115 15:06:39.292202 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 15:06:39.292235 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 15:06:39.292259 12987 solver.cpp:244] Train net output #2: prob = 0.282885 (* 1 = 0.282885 loss) +I0115 15:06:39.546676 12987 sgd_solver.cpp:106] Iteration 631000, lr = 0.0001 +I0115 15:15:26.008888 12987 solver.cpp:228] Iteration 632000, loss = 0.542377 +I0115 15:15:26.009124 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 15:15:26.009136 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 15:15:26.009148 12987 solver.cpp:244] Train net output #2: prob = 0.358223 (* 1 = 0.358223 loss) +I0115 15:15:26.262856 12987 sgd_solver.cpp:106] Iteration 632000, lr = 0.0001 +I0115 15:24:13.042271 12987 solver.cpp:228] Iteration 633000, loss = 0.527244 +I0115 15:24:13.042534 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 15:24:13.042573 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 15:24:13.042605 12987 solver.cpp:244] Train net output #2: prob = 0.432349 (* 1 = 0.432349 loss) +I0115 15:24:13.297118 12987 sgd_solver.cpp:106] Iteration 633000, lr = 0.0001 +I0115 15:32:59.775897 12987 solver.cpp:228] Iteration 634000, loss = 0.536688 +I0115 15:32:59.776141 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 15:32:59.776152 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 15:32:59.776165 12987 solver.cpp:244] Train net output #2: prob = 0.601416 (* 1 = 0.601416 loss) +I0115 15:33:00.028928 12987 sgd_solver.cpp:106] Iteration 634000, lr = 0.0001 +I0115 15:41:46.643172 12987 solver.cpp:228] Iteration 635000, loss = 0.518886 +I0115 15:41:46.643402 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 15:41:46.643432 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 15:41:46.643445 12987 solver.cpp:244] Train net output #2: prob = 0.436556 (* 1 = 0.436556 loss) +I0115 15:41:46.895719 12987 sgd_solver.cpp:106] Iteration 635000, lr = 0.0001 +I0115 15:50:33.693564 12987 solver.cpp:228] Iteration 636000, loss = 0.508077 +I0115 15:50:33.693840 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 15:50:33.693863 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 15:50:33.693884 12987 solver.cpp:244] Train net output #2: prob = 0.722413 (* 1 = 0.722413 loss) +I0115 15:50:33.947325 12987 sgd_solver.cpp:106] Iteration 636000, lr = 0.0001 +I0115 15:59:20.333809 12987 solver.cpp:228] Iteration 637000, loss = 0.487229 +I0115 15:59:20.334076 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 15:59:20.334112 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 15:59:20.334127 12987 solver.cpp:244] Train net output #2: prob = 0.806959 (* 1 = 0.806959 loss) +I0115 15:59:20.589126 12987 sgd_solver.cpp:106] Iteration 637000, lr = 0.0001 +I0115 16:08:07.089774 12987 solver.cpp:228] Iteration 638000, loss = 0.485004 +I0115 16:08:07.090134 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 16:08:07.090169 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 16:08:07.090194 12987 solver.cpp:244] Train net output #2: prob = 0.303374 (* 1 = 0.303374 loss) +I0115 16:08:07.343739 12987 sgd_solver.cpp:106] Iteration 638000, lr = 0.0001 +I0115 16:16:53.631289 12987 solver.cpp:228] Iteration 639000, loss = 0.479372 +I0115 16:16:53.631515 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 16:16:53.631525 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 16:16:53.631537 12987 solver.cpp:244] Train net output #2: prob = 0.32033 (* 1 = 0.32033 loss) +I0115 16:16:53.885411 12987 sgd_solver.cpp:106] Iteration 639000, lr = 0.0001 +I0115 16:25:40.558778 12987 solver.cpp:228] Iteration 640000, loss = 0.449735 +I0115 16:25:40.558993 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0115 16:25:40.559005 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 16:25:40.559017 12987 solver.cpp:244] Train net output #2: prob = 0.342823 (* 1 = 0.342823 loss) +I0115 16:25:40.808740 12987 sgd_solver.cpp:106] Iteration 640000, lr = 0.0001 +I0115 16:34:27.318967 12987 solver.cpp:228] Iteration 641000, loss = 0.5285 +I0115 16:34:27.319196 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 16:34:27.319219 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 16:34:27.319242 12987 solver.cpp:244] Train net output #2: prob = 0.513382 (* 1 = 0.513382 loss) +I0115 16:34:27.574172 12987 sgd_solver.cpp:106] Iteration 641000, lr = 0.0001 +I0115 16:43:14.082998 12987 solver.cpp:228] Iteration 642000, loss = 0.527035 +I0115 16:43:14.083262 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 16:43:14.083290 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 16:43:14.083307 12987 solver.cpp:244] Train net output #2: prob = 0.22605 (* 1 = 0.22605 loss) +I0115 16:43:14.340189 12987 sgd_solver.cpp:106] Iteration 642000, lr = 0.0001 +I0115 16:52:00.547230 12987 solver.cpp:228] Iteration 643000, loss = 0.512256 +I0115 16:52:00.547471 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 16:52:00.547482 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 16:52:00.547493 12987 solver.cpp:244] Train net output #2: prob = 0.543535 (* 1 = 0.543535 loss) +I0115 16:52:00.800778 12987 sgd_solver.cpp:106] Iteration 643000, lr = 0.0001 +I0115 17:00:46.772992 12987 solver.cpp:228] Iteration 644000, loss = 0.522225 +I0115 17:00:46.773242 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 17:00:46.773265 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 17:00:46.773290 12987 solver.cpp:244] Train net output #2: prob = 0.504685 (* 1 = 0.504685 loss) +I0115 17:00:47.027082 12987 sgd_solver.cpp:106] Iteration 644000, lr = 0.0001 +I0115 17:09:33.059635 12987 solver.cpp:228] Iteration 645000, loss = 0.510295 +I0115 17:09:33.059873 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 17:09:33.059887 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 17:09:33.059900 12987 solver.cpp:244] Train net output #2: prob = 0.179517 (* 1 = 0.179517 loss) +I0115 17:09:33.315240 12987 sgd_solver.cpp:106] Iteration 645000, lr = 0.0001 +I0115 17:18:19.285977 12987 solver.cpp:228] Iteration 646000, loss = 0.511037 +I0115 17:18:19.286219 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 17:18:19.286244 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 17:18:19.286260 12987 solver.cpp:244] Train net output #2: prob = 0.517352 (* 1 = 0.517352 loss) +I0115 17:18:19.533741 12987 sgd_solver.cpp:106] Iteration 646000, lr = 0.0001 +I0115 17:27:05.823264 12987 solver.cpp:228] Iteration 647000, loss = 0.48498 +I0115 17:27:05.823462 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 17:27:05.823472 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 17:27:05.823483 12987 solver.cpp:244] Train net output #2: prob = 0.303387 (* 1 = 0.303387 loss) +I0115 17:27:06.076777 12987 sgd_solver.cpp:106] Iteration 647000, lr = 0.0001 +I0115 17:35:52.166021 12987 solver.cpp:228] Iteration 648000, loss = 0.465866 +I0115 17:35:52.166314 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 17:35:52.166354 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 17:35:52.166393 12987 solver.cpp:244] Train net output #2: prob = 0.371894 (* 1 = 0.371894 loss) +I0115 17:35:52.418241 12987 sgd_solver.cpp:106] Iteration 648000, lr = 0.0001 +I0115 17:44:38.563071 12987 solver.cpp:228] Iteration 649000, loss = 0.461408 +I0115 17:44:38.563318 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 17:44:38.563344 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 17:44:38.563364 12987 solver.cpp:244] Train net output #2: prob = 0.383381 (* 1 = 0.383381 loss) +I0115 17:44:38.814419 12987 sgd_solver.cpp:106] Iteration 649000, lr = 0.0001 +I0115 17:53:24.873860 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_650000.caffemodel +I0115 17:53:27.282294 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_650000.solverstate +I0115 17:53:27.702759 12987 solver.cpp:228] Iteration 650000, loss = 0.454908 +I0115 17:53:27.702790 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0115 17:53:27.702796 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 17:53:27.702806 12987 solver.cpp:244] Train net output #2: prob = 0.314356 (* 1 = 0.314356 loss) +I0115 17:53:27.944345 12987 sgd_solver.cpp:106] Iteration 650000, lr = 0.0001 +I0115 18:02:13.809250 12987 solver.cpp:228] Iteration 651000, loss = 0.508568 +I0115 18:02:13.809504 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 18:02:13.809545 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 18:02:13.809571 12987 solver.cpp:244] Train net output #2: prob = 0.836061 (* 1 = 0.836061 loss) +I0115 18:02:14.060248 12987 sgd_solver.cpp:106] Iteration 651000, lr = 0.0001 +I0115 18:11:00.520571 12987 solver.cpp:228] Iteration 652000, loss = 0.526445 +I0115 18:11:00.521198 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 18:11:00.521232 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 18:11:00.521250 12987 solver.cpp:244] Train net output #2: prob = 0.555304 (* 1 = 0.555304 loss) +I0115 18:11:00.775128 12987 sgd_solver.cpp:106] Iteration 652000, lr = 0.0001 +I0115 18:19:47.076970 12987 solver.cpp:228] Iteration 653000, loss = 0.507832 +I0115 18:19:47.077188 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 18:19:47.077198 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 18:19:47.077209 12987 solver.cpp:244] Train net output #2: prob = 0.520736 (* 1 = 0.520736 loss) +I0115 18:19:47.328479 12987 sgd_solver.cpp:106] Iteration 653000, lr = 0.0001 +I0115 18:28:33.499778 12987 solver.cpp:228] Iteration 654000, loss = 0.505624 +I0115 18:28:33.500073 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 18:28:33.500110 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 18:28:33.500139 12987 solver.cpp:244] Train net output #2: prob = 0.2861 (* 1 = 0.2861 loss) +I0115 18:28:33.753379 12987 sgd_solver.cpp:106] Iteration 654000, lr = 0.0001 +I0115 18:37:19.664461 12987 solver.cpp:228] Iteration 655000, loss = 0.512828 +I0115 18:37:19.664644 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 18:37:19.664654 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 18:37:19.664665 12987 solver.cpp:244] Train net output #2: prob = 0.525041 (* 1 = 0.525041 loss) +I0115 18:37:19.915608 12987 sgd_solver.cpp:106] Iteration 655000, lr = 0.0001 +I0115 18:46:06.222569 12987 solver.cpp:228] Iteration 656000, loss = 0.498421 +I0115 18:46:06.222759 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 18:46:06.222769 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 18:46:06.222780 12987 solver.cpp:244] Train net output #2: prob = 0.327988 (* 1 = 0.327988 loss) +I0115 18:46:06.475081 12987 sgd_solver.cpp:106] Iteration 656000, lr = 0.0001 +I0115 18:54:52.736572 12987 solver.cpp:228] Iteration 657000, loss = 0.48559 +I0115 18:54:52.736814 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 18:54:52.736831 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 18:54:52.736850 12987 solver.cpp:244] Train net output #2: prob = 0.408329 (* 1 = 0.408329 loss) +I0115 18:54:52.992156 12987 sgd_solver.cpp:106] Iteration 657000, lr = 0.0001 +I0115 19:03:39.270944 12987 solver.cpp:228] Iteration 658000, loss = 0.468985 +I0115 19:03:39.271162 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 19:03:39.271173 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 19:03:39.271185 12987 solver.cpp:244] Train net output #2: prob = 0.273238 (* 1 = 0.273238 loss) +I0115 19:03:39.525924 12987 sgd_solver.cpp:106] Iteration 658000, lr = 0.0001 +I0115 19:12:24.849053 12987 solver.cpp:228] Iteration 659000, loss = 0.463163 +I0115 19:12:24.849334 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 19:12:24.849364 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 19:12:24.849380 12987 solver.cpp:244] Train net output #2: prob = 0.628935 (* 1 = 0.628935 loss) +I0115 19:12:25.098592 12987 sgd_solver.cpp:106] Iteration 659000, lr = 0.0001 +I0115 19:21:11.503178 12987 solver.cpp:228] Iteration 660000, loss = 0.454787 +I0115 19:21:11.503496 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 19:21:11.503535 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 19:21:11.503563 12987 solver.cpp:244] Train net output #2: prob = 0.785122 (* 1 = 0.785122 loss) +I0115 19:21:11.757455 12987 sgd_solver.cpp:106] Iteration 660000, lr = 0.0001 +I0115 19:29:58.640055 12987 solver.cpp:228] Iteration 661000, loss = 0.51774 +I0115 19:29:58.640333 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 19:29:58.640370 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 19:29:58.640399 12987 solver.cpp:244] Train net output #2: prob = 0.46222 (* 1 = 0.46222 loss) +I0115 19:29:58.892312 12987 sgd_solver.cpp:106] Iteration 661000, lr = 0.0001 +I0115 19:38:45.328402 12987 solver.cpp:228] Iteration 662000, loss = 0.535728 +I0115 19:38:45.328608 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.71875 +I0115 19:38:45.328619 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 19:38:45.328631 12987 solver.cpp:244] Train net output #2: prob = 0.928381 (* 1 = 0.928381 loss) +I0115 19:38:45.582396 12987 sgd_solver.cpp:106] Iteration 662000, lr = 0.0001 +I0115 19:47:31.991813 12987 solver.cpp:228] Iteration 663000, loss = 0.512661 +I0115 19:47:31.992112 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 19:47:31.992147 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 19:47:31.992178 12987 solver.cpp:244] Train net output #2: prob = 0.524965 (* 1 = 0.524965 loss) +I0115 19:47:32.245151 12987 sgd_solver.cpp:106] Iteration 663000, lr = 0.0001 +I0115 19:56:17.950932 12987 solver.cpp:228] Iteration 664000, loss = 0.503716 +I0115 19:56:17.951174 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 19:56:17.951186 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 19:56:17.951200 12987 solver.cpp:244] Train net output #2: prob = 0.43217 (* 1 = 0.43217 loss) +I0115 19:56:18.204728 12987 sgd_solver.cpp:106] Iteration 664000, lr = 0.0001 +I0115 20:05:03.994537 12987 solver.cpp:228] Iteration 665000, loss = 0.505112 +I0115 20:05:03.994874 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 20:05:03.994916 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 20:05:03.994946 12987 solver.cpp:244] Train net output #2: prob = 0.469244 (* 1 = 0.469244 loss) +I0115 20:05:04.249258 12987 sgd_solver.cpp:106] Iteration 665000, lr = 0.0001 +I0115 20:13:49.927913 12987 solver.cpp:228] Iteration 666000, loss = 0.494504 +I0115 20:13:49.928136 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 20:13:49.928148 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 20:13:49.928159 12987 solver.cpp:244] Train net output #2: prob = 0.543495 (* 1 = 0.543495 loss) +I0115 20:13:50.182852 12987 sgd_solver.cpp:106] Iteration 666000, lr = 0.0001 +I0115 20:22:35.909976 12987 solver.cpp:228] Iteration 667000, loss = 0.48252 +I0115 20:22:35.910221 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 20:22:35.910264 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 20:22:35.910291 12987 solver.cpp:244] Train net output #2: prob = 0.647126 (* 1 = 0.647126 loss) +I0115 20:22:36.154582 12987 sgd_solver.cpp:106] Iteration 667000, lr = 0.0001 +I0115 20:31:22.319594 12987 solver.cpp:228] Iteration 668000, loss = 0.47207 +I0115 20:31:22.320801 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 20:31:22.320813 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 20:31:22.320827 12987 solver.cpp:244] Train net output #2: prob = 0.400604 (* 1 = 0.400604 loss) +I0115 20:31:22.572829 12987 sgd_solver.cpp:106] Iteration 668000, lr = 0.0001 +I0115 20:40:08.326436 12987 solver.cpp:228] Iteration 669000, loss = 0.471059 +I0115 20:40:08.326706 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 20:40:08.326737 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 20:40:08.326772 12987 solver.cpp:244] Train net output #2: prob = 0.456186 (* 1 = 0.456186 loss) +I0115 20:40:08.580735 12987 sgd_solver.cpp:106] Iteration 669000, lr = 0.0001 +I0115 20:48:54.902079 12987 solver.cpp:228] Iteration 670000, loss = 0.444424 +I0115 20:48:54.902374 12987 solver.cpp:244] Train net output #0: accuracy@1 = 1 +I0115 20:48:54.902403 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 20:48:54.902432 12987 solver.cpp:244] Train net output #2: prob = 0.148869 (* 1 = 0.148869 loss) +I0115 20:48:55.157539 12987 sgd_solver.cpp:106] Iteration 670000, lr = 0.0001 +I0115 20:57:49.861773 12987 solver.cpp:228] Iteration 671000, loss = 0.512123 +I0115 20:57:49.862030 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0115 20:57:49.862042 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 20:57:49.862058 12987 solver.cpp:244] Train net output #2: prob = 0.284135 (* 1 = 0.284135 loss) +I0115 20:57:50.116258 12987 sgd_solver.cpp:106] Iteration 671000, lr = 0.0001 +I0115 21:06:44.649520 12987 solver.cpp:228] Iteration 672000, loss = 0.518001 +I0115 21:06:44.649812 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 21:06:44.649840 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 21:06:44.649878 12987 solver.cpp:244] Train net output #2: prob = 0.841244 (* 1 = 0.841244 loss) +I0115 21:06:44.904620 12987 sgd_solver.cpp:106] Iteration 672000, lr = 0.0001 +I0115 21:15:32.169178 12987 solver.cpp:228] Iteration 673000, loss = 0.515821 +I0115 21:15:32.169385 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 21:15:32.169400 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 21:15:32.169415 12987 solver.cpp:244] Train net output #2: prob = 0.383011 (* 1 = 0.383011 loss) +I0115 21:15:32.420027 12987 sgd_solver.cpp:106] Iteration 673000, lr = 0.0001 +I0115 21:24:34.631214 12987 solver.cpp:228] Iteration 674000, loss = 0.524544 +I0115 21:24:34.631552 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.75 +I0115 21:24:34.631589 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 21:24:34.631629 12987 solver.cpp:244] Train net output #2: prob = 0.686578 (* 1 = 0.686578 loss) +I0115 21:24:34.907104 12987 sgd_solver.cpp:106] Iteration 674000, lr = 0.0001 +I0115 21:33:49.662633 12987 solver.cpp:228] Iteration 675000, loss = 0.509826 +I0115 21:33:49.662950 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0115 21:33:49.662981 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 21:33:49.663013 12987 solver.cpp:244] Train net output #2: prob = 0.525751 (* 1 = 0.525751 loss) +I0115 21:33:49.934384 12987 sgd_solver.cpp:106] Iteration 675000, lr = 0.0001 +I0115 21:43:05.940395 12987 solver.cpp:228] Iteration 676000, loss = 0.498268 +I0115 21:43:05.940749 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0115 21:43:05.940783 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 21:43:05.940822 12987 solver.cpp:244] Train net output #2: prob = 0.491074 (* 1 = 0.491074 loss) +I0115 21:43:06.188710 12987 sgd_solver.cpp:106] Iteration 676000, lr = 0.0001 +I0115 21:52:21.210440 12987 solver.cpp:228] Iteration 677000, loss = 0.478172 +I0115 21:52:21.210717 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 21:52:21.210731 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 21:52:21.210749 12987 solver.cpp:244] Train net output #2: prob = 0.318865 (* 1 = 0.318865 loss) +I0115 21:52:21.474418 12987 sgd_solver.cpp:106] Iteration 677000, lr = 0.0001 +I0115 22:01:38.026895 12987 solver.cpp:228] Iteration 678000, loss = 0.475829 +I0115 22:01:38.027230 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 22:01:38.027256 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 22:01:38.027292 12987 solver.cpp:244] Train net output #2: prob = 0.351434 (* 1 = 0.351434 loss) +I0115 22:01:38.305326 12987 sgd_solver.cpp:106] Iteration 678000, lr = 0.0001 +I0115 22:10:58.227596 12987 solver.cpp:228] Iteration 679000, loss = 0.471979 +I0115 22:10:58.227895 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0115 22:10:58.227926 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 22:10:58.227958 12987 solver.cpp:244] Train net output #2: prob = 0.312059 (* 1 = 0.312059 loss) +I0115 22:10:58.485299 12987 sgd_solver.cpp:106] Iteration 679000, lr = 0.0001 +I0115 22:20:17.400990 12987 solver.cpp:228] Iteration 680000, loss = 0.443703 +I0115 22:20:17.401283 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 22:20:17.401295 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 22:20:17.401312 12987 solver.cpp:244] Train net output #2: prob = 0.367896 (* 1 = 0.367896 loss) +I0115 22:20:17.685325 12987 sgd_solver.cpp:106] Iteration 680000, lr = 0.0001 +I0115 22:29:34.465490 12987 solver.cpp:228] Iteration 681000, loss = 0.511634 +I0115 22:29:34.465757 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 22:29:34.465770 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 22:29:34.465791 12987 solver.cpp:244] Train net output #2: prob = 0.459666 (* 1 = 0.459666 loss) +I0115 22:29:34.735368 12987 sgd_solver.cpp:106] Iteration 681000, lr = 0.0001 +I0115 22:38:54.656054 12987 solver.cpp:228] Iteration 682000, loss = 0.512468 +I0115 22:38:54.656335 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 22:38:54.656352 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 22:38:54.656369 12987 solver.cpp:244] Train net output #2: prob = 0.330239 (* 1 = 0.330239 loss) +I0115 22:38:54.891885 12987 sgd_solver.cpp:106] Iteration 682000, lr = 0.0001 +I0115 22:48:16.091197 12987 solver.cpp:228] Iteration 683000, loss = 0.496357 +I0115 22:48:16.091516 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 22:48:16.091545 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 22:48:16.091575 12987 solver.cpp:244] Train net output #2: prob = 0.269 (* 1 = 0.269 loss) +I0115 22:48:16.365308 12987 sgd_solver.cpp:106] Iteration 683000, lr = 0.0001 +I0115 22:57:40.404765 12987 solver.cpp:228] Iteration 684000, loss = 0.512047 +I0115 22:57:40.405105 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 22:57:40.405134 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 22:57:40.405166 12987 solver.cpp:244] Train net output #2: prob = 0.424234 (* 1 = 0.424234 loss) +I0115 22:57:40.655316 12987 sgd_solver.cpp:106] Iteration 684000, lr = 0.0001 +I0115 23:07:03.550559 12987 solver.cpp:228] Iteration 685000, loss = 0.49994 +I0115 23:07:03.550855 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 23:07:03.550868 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 23:07:03.550886 12987 solver.cpp:244] Train net output #2: prob = 0.41906 (* 1 = 0.41906 loss) +I0115 23:07:03.825321 12987 sgd_solver.cpp:106] Iteration 685000, lr = 0.0001 +I0115 23:16:22.298388 12987 solver.cpp:228] Iteration 686000, loss = 0.502942 +I0115 23:16:22.298744 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0115 23:16:22.298779 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 23:16:22.298797 12987 solver.cpp:244] Train net output #2: prob = 0.322869 (* 1 = 0.322869 loss) +I0115 23:16:22.569164 12987 sgd_solver.cpp:106] Iteration 686000, lr = 0.0001 +I0115 23:25:39.455889 12987 solver.cpp:228] Iteration 687000, loss = 0.482249 +I0115 23:25:39.456524 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 23:25:39.456544 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0115 23:25:39.456563 12987 solver.cpp:244] Train net output #2: prob = 0.721974 (* 1 = 0.721974 loss) +I0115 23:25:39.714102 12987 sgd_solver.cpp:106] Iteration 687000, lr = 0.0001 +I0115 23:34:59.677759 12987 solver.cpp:228] Iteration 688000, loss = 0.47067 +I0115 23:34:59.678071 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 23:34:59.678100 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 23:34:59.678133 12987 solver.cpp:244] Train net output #2: prob = 0.620106 (* 1 = 0.620106 loss) +I0115 23:34:59.987840 12987 sgd_solver.cpp:106] Iteration 688000, lr = 0.0001 +I0115 23:44:22.492918 12987 solver.cpp:228] Iteration 689000, loss = 0.456377 +I0115 23:44:22.493191 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0115 23:44:22.493209 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0115 23:44:22.493227 12987 solver.cpp:244] Train net output #2: prob = 0.506524 (* 1 = 0.506524 loss) +I0115 23:44:22.755328 12987 sgd_solver.cpp:106] Iteration 689000, lr = 0.0001 +I0115 23:53:41.901437 12987 solver.cpp:228] Iteration 690000, loss = 0.449367 +I0115 23:53:41.910711 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0115 23:53:41.910743 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0115 23:53:41.910769 12987 solver.cpp:244] Train net output #2: prob = 0.554565 (* 1 = 0.554565 loss) +I0115 23:53:42.161547 12987 sgd_solver.cpp:106] Iteration 690000, lr = 0.0001 +I0116 00:02:58.977749 12987 solver.cpp:228] Iteration 691000, loss = 0.505308 +I0116 00:02:58.978039 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 00:02:58.978051 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 00:02:58.978068 12987 solver.cpp:244] Train net output #2: prob = 0.726565 (* 1 = 0.726565 loss) +I0116 00:02:59.235309 12987 sgd_solver.cpp:106] Iteration 691000, lr = 0.0001 +I0116 00:12:14.786319 12987 solver.cpp:228] Iteration 692000, loss = 0.513471 +I0116 00:12:14.795382 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 00:12:14.795439 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0116 00:12:14.795472 12987 solver.cpp:244] Train net output #2: prob = 0.889078 (* 1 = 0.889078 loss) +I0116 00:12:15.015310 12987 sgd_solver.cpp:106] Iteration 692000, lr = 0.0001 +I0116 00:21:36.808346 12987 solver.cpp:228] Iteration 693000, loss = 0.497735 +I0116 00:21:36.808840 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0116 00:21:36.808931 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 00:21:36.809026 12987 solver.cpp:244] Train net output #2: prob = 0.490123 (* 1 = 0.490123 loss) +I0116 00:21:37.082833 12987 sgd_solver.cpp:106] Iteration 693000, lr = 0.0001 +I0116 00:30:58.685200 12987 solver.cpp:228] Iteration 694000, loss = 0.498848 +I0116 00:30:58.685549 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0116 00:30:58.685585 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 00:30:58.685624 12987 solver.cpp:244] Train net output #2: prob = 0.325685 (* 1 = 0.325685 loss) +I0116 00:30:58.944444 12987 sgd_solver.cpp:106] Iteration 694000, lr = 0.0001 +I0116 00:40:18.784590 12987 solver.cpp:228] Iteration 695000, loss = 0.501424 +I0116 00:40:18.784832 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0116 00:40:18.784847 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 00:40:18.784862 12987 solver.cpp:244] Train net output #2: prob = 0.549762 (* 1 = 0.549762 loss) +I0116 00:40:19.015321 12987 sgd_solver.cpp:106] Iteration 695000, lr = 0.0001 +I0116 00:49:35.202005 12987 solver.cpp:228] Iteration 696000, loss = 0.485288 +I0116 00:49:35.202234 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0116 00:49:35.202245 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 00:49:35.202260 12987 solver.cpp:244] Train net output #2: prob = 0.188456 (* 1 = 0.188456 loss) +I0116 00:49:35.447818 12987 sgd_solver.cpp:106] Iteration 696000, lr = 0.0001 +I0116 00:58:56.299181 12987 solver.cpp:228] Iteration 697000, loss = 0.483219 +I0116 00:58:56.305167 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0116 00:58:56.305209 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0116 00:58:56.305253 12987 solver.cpp:244] Train net output #2: prob = 0.693747 (* 1 = 0.693747 loss) +I0116 00:58:56.564407 12987 sgd_solver.cpp:106] Iteration 697000, lr = 0.0001 +I0116 01:08:19.475231 12987 solver.cpp:228] Iteration 698000, loss = 0.467578 +I0116 01:08:19.477059 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0116 01:08:19.477090 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0116 01:08:19.477109 12987 solver.cpp:244] Train net output #2: prob = 0.42706 (* 1 = 0.42706 loss) +I0116 01:08:19.734292 12987 sgd_solver.cpp:106] Iteration 698000, lr = 0.0001 +I0116 01:17:36.737573 12987 solver.cpp:228] Iteration 699000, loss = 0.464078 +I0116 01:17:36.737821 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0116 01:17:36.737833 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 01:17:36.737848 12987 solver.cpp:244] Train net output #2: prob = 0.294365 (* 1 = 0.294365 loss) +I0116 01:17:36.989609 12987 sgd_solver.cpp:106] Iteration 699000, lr = 0.0001 +I0116 01:26:53.085302 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_700000.caffemodel +I0116 01:26:57.573696 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_700000.solverstate +I0116 01:26:58.095072 12987 solver.cpp:228] Iteration 700000, loss = 0.455263 +I0116 01:26:58.095206 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 01:26:58.095247 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 01:26:58.095283 12987 solver.cpp:244] Train net output #2: prob = 0.537101 (* 1 = 0.537101 loss) +I0116 01:26:58.375316 12987 sgd_solver.cpp:106] Iteration 700000, lr = 0.0001 +I0116 01:36:15.434980 12987 solver.cpp:228] Iteration 701000, loss = 0.502897 +I0116 01:36:15.435339 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0116 01:36:15.435369 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 01:36:15.435405 12987 solver.cpp:244] Train net output #2: prob = 0.231088 (* 1 = 0.231088 loss) +I0116 01:36:15.715351 12987 sgd_solver.cpp:106] Iteration 701000, lr = 0.0001 +I0116 01:45:32.199749 12987 solver.cpp:228] Iteration 702000, loss = 0.525306 +I0116 01:45:32.215329 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0116 01:45:32.215358 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 01:45:32.215374 12987 solver.cpp:244] Train net output #2: prob = 0.215306 (* 1 = 0.215306 loss) +I0116 01:45:32.442956 12987 sgd_solver.cpp:106] Iteration 702000, lr = 0.0001 +I0116 01:54:49.359992 12987 solver.cpp:228] Iteration 703000, loss = 0.505537 +I0116 01:54:49.360256 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 01:54:49.360270 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0116 01:54:49.360286 12987 solver.cpp:244] Train net output #2: prob = 0.794188 (* 1 = 0.794188 loss) +I0116 01:54:49.620704 12987 sgd_solver.cpp:106] Iteration 703000, lr = 0.0001 +I0116 02:04:04.587389 12987 solver.cpp:228] Iteration 704000, loss = 0.495637 +I0116 02:04:04.587642 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0116 02:04:04.587656 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 02:04:04.587673 12987 solver.cpp:244] Train net output #2: prob = 0.576157 (* 1 = 0.576157 loss) +I0116 02:04:04.830677 12987 sgd_solver.cpp:106] Iteration 704000, lr = 0.0001 +I0116 02:13:22.007719 12987 solver.cpp:228] Iteration 705000, loss = 0.505752 +I0116 02:13:22.008004 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 02:13:22.008018 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 02:13:22.008035 12987 solver.cpp:244] Train net output #2: prob = 0.604264 (* 1 = 0.604264 loss) +I0116 02:13:22.278331 12987 sgd_solver.cpp:106] Iteration 705000, lr = 0.0001 +I0116 02:22:39.156839 12987 solver.cpp:228] Iteration 706000, loss = 0.48714 +I0116 02:22:39.157192 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 02:22:39.157225 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 02:22:39.157263 12987 solver.cpp:244] Train net output #2: prob = 0.480146 (* 1 = 0.480146 loss) +I0116 02:22:39.433506 12987 sgd_solver.cpp:106] Iteration 706000, lr = 0.0001 +I0116 02:31:52.959949 12987 solver.cpp:228] Iteration 707000, loss = 0.475371 +I0116 02:31:52.960283 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0116 02:31:52.960310 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0116 02:31:52.960341 12987 solver.cpp:244] Train net output #2: prob = 0.749033 (* 1 = 0.749033 loss) +I0116 02:31:53.253306 12987 sgd_solver.cpp:106] Iteration 707000, lr = 0.0001 +I0116 02:41:05.931502 12987 solver.cpp:228] Iteration 708000, loss = 0.461746 +I0116 02:41:05.931839 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 02:41:05.931870 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 02:41:05.931911 12987 solver.cpp:244] Train net output #2: prob = 0.361659 (* 1 = 0.361659 loss) +I0116 02:41:06.174245 12987 sgd_solver.cpp:106] Iteration 708000, lr = 0.0001 +I0116 02:50:20.418098 12987 solver.cpp:228] Iteration 709000, loss = 0.463543 +I0116 02:50:20.425287 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0116 02:50:20.425319 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0116 02:50:20.425338 12987 solver.cpp:244] Train net output #2: prob = 0.47151 (* 1 = 0.47151 loss) +I0116 02:50:20.663542 12987 sgd_solver.cpp:106] Iteration 709000, lr = 0.0001 +I0116 02:59:32.699959 12987 solver.cpp:228] Iteration 710000, loss = 0.444679 +I0116 02:59:32.700286 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0116 02:59:32.700325 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 02:59:32.700341 12987 solver.cpp:244] Train net output #2: prob = 0.366596 (* 1 = 0.366596 loss) +I0116 02:59:32.975330 12987 sgd_solver.cpp:106] Iteration 710000, lr = 0.0001 +I0116 03:08:46.189340 12987 solver.cpp:228] Iteration 711000, loss = 0.499896 +I0116 03:08:46.189630 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 03:08:46.189645 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 03:08:46.189666 12987 solver.cpp:244] Train net output #2: prob = 0.531176 (* 1 = 0.531176 loss) +I0116 03:08:46.444406 12987 sgd_solver.cpp:106] Iteration 711000, lr = 0.0001 +I0116 03:18:00.465786 12987 solver.cpp:228] Iteration 712000, loss = 0.508135 +I0116 03:18:00.469043 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0116 03:18:00.469069 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 03:18:00.469087 12987 solver.cpp:244] Train net output #2: prob = 0.494069 (* 1 = 0.494069 loss) +I0116 03:18:00.725318 12987 sgd_solver.cpp:106] Iteration 712000, lr = 0.0001 +I0116 03:27:14.130137 12987 solver.cpp:228] Iteration 713000, loss = 0.503102 +I0116 03:27:14.134662 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0116 03:27:14.134690 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 03:27:14.134709 12987 solver.cpp:244] Train net output #2: prob = 0.408177 (* 1 = 0.408177 loss) +I0116 03:27:14.453963 12987 sgd_solver.cpp:106] Iteration 713000, lr = 0.0001 +I0116 03:36:27.732501 12987 solver.cpp:228] Iteration 714000, loss = 0.512513 +I0116 03:36:27.732764 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 03:36:27.732777 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 03:36:27.732794 12987 solver.cpp:244] Train net output #2: prob = 0.483033 (* 1 = 0.483033 loss) +I0116 03:36:28.020848 12987 sgd_solver.cpp:106] Iteration 714000, lr = 0.0001 +I0116 03:45:41.634462 12987 solver.cpp:228] Iteration 715000, loss = 0.496531 +I0116 03:45:41.639344 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0116 03:45:41.639384 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 03:45:41.639403 12987 solver.cpp:244] Train net output #2: prob = 0.613316 (* 1 = 0.613316 loss) +I0116 03:45:41.911727 12987 sgd_solver.cpp:106] Iteration 715000, lr = 0.0001 +I0116 03:54:56.105669 12987 solver.cpp:228] Iteration 716000, loss = 0.49509 +I0116 03:54:56.105952 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0116 03:54:56.105964 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0116 03:54:56.105983 12987 solver.cpp:244] Train net output #2: prob = 0.583968 (* 1 = 0.583968 loss) +I0116 03:54:56.385327 12987 sgd_solver.cpp:106] Iteration 716000, lr = 0.0001 +I0116 04:04:08.573487 12987 solver.cpp:228] Iteration 717000, loss = 0.468843 +I0116 04:04:08.573812 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0116 04:04:08.573843 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 04:04:08.573878 12987 solver.cpp:244] Train net output #2: prob = 0.212246 (* 1 = 0.212246 loss) +I0116 04:04:08.865306 12987 sgd_solver.cpp:106] Iteration 717000, lr = 0.0001 +I0116 04:13:22.089962 12987 solver.cpp:228] Iteration 718000, loss = 0.467476 +I0116 04:13:22.090297 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0116 04:13:22.090337 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 04:13:22.090358 12987 solver.cpp:244] Train net output #2: prob = 0.528502 (* 1 = 0.528502 loss) +I0116 04:13:22.369695 12987 sgd_solver.cpp:106] Iteration 718000, lr = 0.0001 +I0116 04:22:37.296355 12987 solver.cpp:228] Iteration 719000, loss = 0.469621 +I0116 04:22:37.300081 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0116 04:22:37.300122 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.90625 +I0116 04:22:37.300145 12987 solver.cpp:244] Train net output #2: prob = 0.809282 (* 1 = 0.809282 loss) +I0116 04:22:37.564276 12987 sgd_solver.cpp:106] Iteration 719000, lr = 0.0001 +I0116 04:31:49.791187 12987 solver.cpp:228] Iteration 720000, loss = 0.437431 +I0116 04:31:49.791458 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0116 04:31:49.791470 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 04:31:49.791484 12987 solver.cpp:244] Train net output #2: prob = 0.409665 (* 1 = 0.409665 loss) +I0116 04:31:50.045037 12987 sgd_solver.cpp:106] Iteration 720000, lr = 0.0001 +I0116 04:41:02.000133 12987 solver.cpp:228] Iteration 721000, loss = 0.49874 +I0116 04:41:02.000378 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0116 04:41:02.000387 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 04:41:02.000401 12987 solver.cpp:244] Train net output #2: prob = 0.356521 (* 1 = 0.356521 loss) +I0116 04:41:02.250401 12987 sgd_solver.cpp:106] Iteration 721000, lr = 0.0001 +I0116 04:50:20.129758 12987 solver.cpp:228] Iteration 722000, loss = 0.498097 +I0116 04:50:20.130054 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0116 04:50:20.130066 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0116 04:50:20.130084 12987 solver.cpp:244] Train net output #2: prob = 0.824573 (* 1 = 0.824573 loss) +I0116 04:50:20.365314 12987 sgd_solver.cpp:106] Iteration 722000, lr = 0.0001 +I0116 04:59:37.892457 12987 solver.cpp:228] Iteration 723000, loss = 0.490748 +I0116 04:59:37.895798 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0116 04:59:37.895834 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 04:59:37.895856 12987 solver.cpp:244] Train net output #2: prob = 0.657133 (* 1 = 0.657133 loss) +I0116 04:59:38.155336 12987 sgd_solver.cpp:106] Iteration 723000, lr = 0.0001 +I0116 05:08:51.969736 12987 solver.cpp:228] Iteration 724000, loss = 0.500121 +I0116 05:08:51.970597 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0116 05:08:51.970639 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 05:08:51.970660 12987 solver.cpp:244] Train net output #2: prob = 0.433665 (* 1 = 0.433665 loss) +I0116 05:08:52.244673 12987 sgd_solver.cpp:106] Iteration 724000, lr = 0.0001 +I0116 05:18:09.149332 12987 solver.cpp:228] Iteration 725000, loss = 0.484833 +I0116 05:18:09.149586 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0116 05:18:09.149600 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 05:18:09.149616 12987 solver.cpp:244] Train net output #2: prob = 0.309007 (* 1 = 0.309007 loss) +I0116 05:18:09.415338 12987 sgd_solver.cpp:106] Iteration 725000, lr = 0.0001 +I0116 05:27:32.187047 12987 solver.cpp:228] Iteration 726000, loss = 0.488701 +I0116 05:27:32.187361 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0116 05:27:32.187398 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 05:27:32.187443 12987 solver.cpp:244] Train net output #2: prob = 0.436806 (* 1 = 0.436806 loss) +I0116 05:27:32.445353 12987 sgd_solver.cpp:106] Iteration 726000, lr = 0.0001 +I0116 05:36:52.922385 12987 solver.cpp:228] Iteration 727000, loss = 0.473257 +I0116 05:36:52.945375 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0116 05:36:52.945430 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 05:36:52.945471 12987 solver.cpp:244] Train net output #2: prob = 0.379172 (* 1 = 0.379172 loss) +I0116 05:36:53.155320 12987 sgd_solver.cpp:106] Iteration 727000, lr = 0.0001 +I0116 05:46:12.468420 12987 solver.cpp:228] Iteration 728000, loss = 0.461976 +I0116 05:46:12.468720 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0116 05:46:12.468734 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 05:46:12.468750 12987 solver.cpp:244] Train net output #2: prob = 0.42207 (* 1 = 0.42207 loss) +I0116 05:46:12.725652 12987 sgd_solver.cpp:106] Iteration 728000, lr = 0.0001 +I0116 05:55:30.626134 12987 solver.cpp:228] Iteration 729000, loss = 0.449813 +I0116 05:55:30.626442 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0116 05:55:30.626472 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0116 05:55:30.626503 12987 solver.cpp:244] Train net output #2: prob = 0.640318 (* 1 = 0.640318 loss) +I0116 05:55:30.895313 12987 sgd_solver.cpp:106] Iteration 729000, lr = 0.0001 +I0116 06:04:52.072124 12987 solver.cpp:228] Iteration 730000, loss = 0.444629 +I0116 06:04:52.072392 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 06:04:52.072402 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 06:04:52.072415 12987 solver.cpp:244] Train net output #2: prob = 0.641275 (* 1 = 0.641275 loss) +I0116 06:04:52.335331 12987 sgd_solver.cpp:106] Iteration 730000, lr = 0.0001 +I0116 06:14:16.528869 12987 solver.cpp:228] Iteration 731000, loss = 0.485495 +I0116 06:14:16.535336 12987 solver.cpp:244] Train net output #0: accuracy@1 = 1 +I0116 06:14:16.535369 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 06:14:16.535389 12987 solver.cpp:244] Train net output #2: prob = 0.208846 (* 1 = 0.208846 loss) +I0116 06:14:16.762156 12987 sgd_solver.cpp:106] Iteration 731000, lr = 0.0001 +I0116 06:23:37.970787 12987 solver.cpp:228] Iteration 732000, loss = 0.497979 +I0116 06:23:37.973495 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0116 06:23:37.973538 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 06:23:37.973572 12987 solver.cpp:244] Train net output #2: prob = 0.198677 (* 1 = 0.198677 loss) +I0116 06:23:38.245301 12987 sgd_solver.cpp:106] Iteration 732000, lr = 0.0001 +I0116 06:32:58.616505 12987 solver.cpp:228] Iteration 733000, loss = 0.487427 +I0116 06:32:58.616822 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0116 06:32:58.616852 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 06:32:58.616888 12987 solver.cpp:244] Train net output #2: prob = 0.30952 (* 1 = 0.30952 loss) +I0116 06:32:58.885324 12987 sgd_solver.cpp:106] Iteration 733000, lr = 0.0001 +I0116 06:42:18.366852 12987 solver.cpp:228] Iteration 734000, loss = 0.488259 +I0116 06:42:18.367178 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0116 06:42:18.367209 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 06:42:18.367244 12987 solver.cpp:244] Train net output #2: prob = 0.29082 (* 1 = 0.29082 loss) +I0116 06:42:18.625305 12987 sgd_solver.cpp:106] Iteration 734000, lr = 0.0001 +I0116 06:51:40.339202 12987 solver.cpp:228] Iteration 735000, loss = 0.484262 +I0116 06:51:40.339473 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.96875 +I0116 06:51:40.339486 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 06:51:40.339503 12987 solver.cpp:244] Train net output #2: prob = 0.408711 (* 1 = 0.408711 loss) +I0116 06:51:40.585312 12987 sgd_solver.cpp:106] Iteration 735000, lr = 0.0001 +I0116 07:01:00.436148 12987 solver.cpp:228] Iteration 736000, loss = 0.478497 +I0116 07:01:00.436457 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0116 07:01:00.436498 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 07:01:00.436795 12987 solver.cpp:244] Train net output #2: prob = 0.278875 (* 1 = 0.278875 loss) +I0116 07:01:00.694785 12987 sgd_solver.cpp:106] Iteration 736000, lr = 0.0001 +I0116 07:10:15.670490 12987 solver.cpp:228] Iteration 737000, loss = 0.471144 +I0116 07:10:15.670747 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 07:10:15.670760 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 07:10:15.670779 12987 solver.cpp:244] Train net output #2: prob = 0.451382 (* 1 = 0.451382 loss) +I0116 07:10:15.905329 12987 sgd_solver.cpp:106] Iteration 737000, lr = 0.0001 +I0116 07:19:31.802706 12987 solver.cpp:228] Iteration 738000, loss = 0.456973 +I0116 07:19:31.803077 12987 solver.cpp:244] Train net output #0: accuracy@1 = 1 +I0116 07:19:31.803107 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 07:19:31.803139 12987 solver.cpp:244] Train net output #2: prob = 0.145225 (* 1 = 0.145225 loss) +I0116 07:19:32.035331 12987 sgd_solver.cpp:106] Iteration 738000, lr = 0.0001 +I0116 07:28:51.327764 12987 solver.cpp:228] Iteration 739000, loss = 0.451114 +I0116 07:28:51.328047 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 07:28:51.328059 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 07:28:51.328076 12987 solver.cpp:244] Train net output #2: prob = 0.442892 (* 1 = 0.442892 loss) +I0116 07:28:51.585331 12987 sgd_solver.cpp:106] Iteration 739000, lr = 0.0001 +I0116 07:38:07.767560 12987 solver.cpp:228] Iteration 740000, loss = 0.445443 +I0116 07:38:07.767885 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 07:38:07.767927 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 07:38:07.767951 12987 solver.cpp:244] Train net output #2: prob = 0.425568 (* 1 = 0.425568 loss) +I0116 07:38:08.024420 12987 sgd_solver.cpp:106] Iteration 740000, lr = 0.0001 +I0116 07:47:21.981330 12987 solver.cpp:228] Iteration 741000, loss = 0.488078 +I0116 07:47:21.981679 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.78125 +I0116 07:47:21.981710 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.9375 +I0116 07:47:21.981745 12987 solver.cpp:244] Train net output #2: prob = 0.916215 (* 1 = 0.916215 loss) +I0116 07:47:22.245331 12987 sgd_solver.cpp:106] Iteration 741000, lr = 0.0001 +I0116 07:56:35.138787 12987 solver.cpp:228] Iteration 742000, loss = 0.512646 +I0116 07:56:35.139055 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0116 07:56:35.139066 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 07:56:35.139081 12987 solver.cpp:244] Train net output #2: prob = 0.576157 (* 1 = 0.576157 loss) +I0116 07:56:35.405330 12987 sgd_solver.cpp:106] Iteration 742000, lr = 0.0001 +I0116 08:05:50.531869 12987 solver.cpp:228] Iteration 743000, loss = 0.491434 +I0116 08:05:50.532197 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0116 08:05:50.532228 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 08:05:50.532263 12987 solver.cpp:244] Train net output #2: prob = 0.226709 (* 1 = 0.226709 loss) +I0116 08:05:50.825325 12987 sgd_solver.cpp:106] Iteration 743000, lr = 0.0001 +I0116 08:15:08.658617 12987 solver.cpp:228] Iteration 744000, loss = 0.485654 +I0116 08:15:08.665383 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0116 08:15:08.665426 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 08:15:08.665446 12987 solver.cpp:244] Train net output #2: prob = 0.489081 (* 1 = 0.489081 loss) +I0116 08:15:08.935314 12987 sgd_solver.cpp:106] Iteration 744000, lr = 0.0001 +I0116 08:24:26.245290 12987 solver.cpp:228] Iteration 745000, loss = 0.490687 +I0116 08:24:26.255331 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0116 08:24:26.255369 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 08:24:26.255389 12987 solver.cpp:244] Train net output #2: prob = 0.574061 (* 1 = 0.574061 loss) +I0116 08:24:26.505317 12987 sgd_solver.cpp:106] Iteration 745000, lr = 0.0001 +I0116 08:33:43.671115 12987 solver.cpp:228] Iteration 746000, loss = 0.483817 +I0116 08:33:43.671416 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 08:33:43.671428 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 08:33:43.671447 12987 solver.cpp:244] Train net output #2: prob = 0.619766 (* 1 = 0.619766 loss) +I0116 08:33:43.925329 12987 sgd_solver.cpp:106] Iteration 746000, lr = 0.0001 +I0116 08:43:02.900254 12987 solver.cpp:228] Iteration 747000, loss = 0.462328 +I0116 08:43:02.900604 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0116 08:43:02.900631 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 08:43:02.900663 12987 solver.cpp:244] Train net output #2: prob = 0.146569 (* 1 = 0.146569 loss) +I0116 08:43:03.164019 12987 sgd_solver.cpp:106] Iteration 747000, lr = 0.0001 +I0116 08:52:23.317441 12987 solver.cpp:228] Iteration 748000, loss = 0.46324 +I0116 08:52:23.317746 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 08:52:23.317761 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 08:52:23.317780 12987 solver.cpp:244] Train net output #2: prob = 0.356275 (* 1 = 0.356275 loss) +I0116 08:52:23.577009 12987 sgd_solver.cpp:106] Iteration 748000, lr = 0.0001 +I0116 09:01:42.170807 12987 solver.cpp:228] Iteration 749000, loss = 0.453451 +I0116 09:01:42.173391 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0116 09:01:42.173426 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 09:01:42.173465 12987 solver.cpp:244] Train net output #2: prob = 0.291792 (* 1 = 0.291792 loss) +I0116 09:01:42.475335 12987 sgd_solver.cpp:106] Iteration 749000, lr = 0.0001 +I0116 09:10:45.460990 12987 solver.cpp:454] Snapshotting to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_750000.caffemodel +I0116 09:10:49.829478 12987 sgd_solver.cpp:273] Snapshotting solver state to binary proto file /home/heyihui/ceph/resnet-imagenet-caffe/resnet_32/snapshot/resnet_32_iter_750000.solverstate +I0116 09:10:50.266604 12987 solver.cpp:228] Iteration 750000, loss = 0.442246 +I0116 09:10:50.266651 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0116 09:10:50.266661 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 09:10:50.266672 12987 solver.cpp:244] Train net output #2: prob = 0.427658 (* 1 = 0.427658 loss) +I0116 09:10:50.545303 12987 sgd_solver.cpp:106] Iteration 750000, lr = 0.0001 +I0116 09:19:38.279213 12987 solver.cpp:228] Iteration 751000, loss = 0.485426 +I0116 09:19:38.279490 12987 solver.cpp:244] Train net output #0: accuracy@1 = 1 +I0116 09:19:38.279525 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 09:19:38.279544 12987 solver.cpp:244] Train net output #2: prob = 0.144066 (* 1 = 0.144066 loss) +I0116 09:19:38.533370 12987 sgd_solver.cpp:106] Iteration 751000, lr = 0.0001 +I0116 09:28:24.863447 12987 solver.cpp:228] Iteration 752000, loss = 0.499376 +I0116 09:28:24.863729 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0116 09:28:24.863759 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 09:28:24.863775 12987 solver.cpp:244] Train net output #2: prob = 0.808888 (* 1 = 0.808888 loss) +I0116 09:28:25.115854 12987 sgd_solver.cpp:106] Iteration 752000, lr = 0.0001 +I0116 09:37:10.100821 12987 solver.cpp:228] Iteration 753000, loss = 0.492518 +I0116 09:37:10.101557 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0116 09:37:10.101568 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 09:37:10.101580 12987 solver.cpp:244] Train net output #2: prob = 0.657415 (* 1 = 0.657415 loss) +I0116 09:37:10.356238 12987 sgd_solver.cpp:106] Iteration 753000, lr = 0.0001 +I0116 09:45:57.012003 12987 solver.cpp:228] Iteration 754000, loss = 0.504447 +I0116 09:45:57.013126 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.8125 +I0116 09:45:57.013136 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 09:45:57.013151 12987 solver.cpp:244] Train net output #2: prob = 0.475494 (* 1 = 0.475494 loss) +I0116 09:45:57.267081 12987 sgd_solver.cpp:106] Iteration 754000, lr = 0.0001 +I0116 09:54:43.735949 12987 solver.cpp:228] Iteration 755000, loss = 0.49363 +I0116 09:54:43.736169 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0116 09:54:43.736179 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 09:54:43.736192 12987 solver.cpp:244] Train net output #2: prob = 0.559306 (* 1 = 0.559306 loss) +I0116 09:54:43.991713 12987 sgd_solver.cpp:106] Iteration 755000, lr = 0.0001 +I0116 10:03:30.522609 12987 solver.cpp:228] Iteration 756000, loss = 0.481566 +I0116 10:03:30.522889 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.9375 +I0116 10:03:30.522929 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 10:03:30.522956 12987 solver.cpp:244] Train net output #2: prob = 0.304712 (* 1 = 0.304712 loss) +I0116 10:03:30.770586 12987 sgd_solver.cpp:106] Iteration 756000, lr = 0.0001 +I0116 10:12:17.132488 12987 solver.cpp:228] Iteration 757000, loss = 0.462115 +I0116 10:12:17.132791 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.90625 +I0116 10:12:17.132841 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 10:12:17.132863 12987 solver.cpp:244] Train net output #2: prob = 0.273516 (* 1 = 0.273516 loss) +I0116 10:12:17.386342 12987 sgd_solver.cpp:106] Iteration 757000, lr = 0.0001 +I0116 10:21:03.678223 12987 solver.cpp:228] Iteration 758000, loss = 0.463793 +I0116 10:21:03.678431 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.875 +I0116 10:21:03.678442 12987 solver.cpp:244] Train net output #1: accuracy@5 = 0.96875 +I0116 10:21:03.678455 12987 solver.cpp:244] Train net output #2: prob = 0.332867 (* 1 = 0.332867 loss) +I0116 10:21:03.934134 12987 sgd_solver.cpp:106] Iteration 758000, lr = 0.0001 +I0116 10:29:50.468341 12987 solver.cpp:228] Iteration 759000, loss = 0.459418 +I0116 10:29:50.468583 12987 solver.cpp:244] Train net output #0: accuracy@1 = 1 +I0116 10:29:50.468593 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 10:29:50.468614 12987 solver.cpp:244] Train net output #2: prob = 0.183797 (* 1 = 0.183797 loss) +I0116 10:29:50.713209 12987 sgd_solver.cpp:106] Iteration 759000, lr = 0.0001 +I0116 10:38:37.041018 12987 solver.cpp:228] Iteration 760000, loss = 0.439904 +I0116 10:38:37.041309 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 10:38:37.041352 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 10:38:37.041378 12987 solver.cpp:244] Train net output #2: prob = 0.391513 (* 1 = 0.391513 loss) +I0116 10:38:37.295380 12987 sgd_solver.cpp:106] Iteration 760000, lr = 0.0001 +I0116 10:47:28.399267 12987 solver.cpp:228] Iteration 761000, loss = 0.486013 +I0116 10:47:28.399617 12987 solver.cpp:244] Train net output #0: accuracy@1 = 0.84375 +I0116 10:47:28.399633 12987 solver.cpp:244] Train net output #1: accuracy@5 = 1 +I0116 10:47:28.399649 12987 solver.cpp:244] Train net output #2: prob = 0.451694 (* 1 = 0.451694 loss) +I0116 10:47:28.653844 12987 sgd_solver.cpp:106] Iteration 761000, lr = 0.0001 diff --git a/resnet_32/loss.png b/resnet_32/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..b07cb713cd39aecbaa743b730282e422c4cd10b0 GIT binary patch literal 36394 zcmeFZhdbAA{5Jd%EwU?AM$$kL6{U<)SryqrlB5Wgkxd$+Az76qJ3D))k`%HZ|bMwCch={Jr|Byp9!x!e~hTQN5OqzD%L;P!x_HR=0c6 z-)!%sUN^V!XChzW;{uE|^re%CG>-QH(Pi{4SvHC(V$F`*IcAsL|!Rv$DC(atS zSe0kji+*CK!n?RJHp~m*e~Qfa!xZwt&pk9uF3Xn&6|GLCmFz)h} zv(bHP(Jb*jBOyA(rBT_R>Ye|V+ScV1x1Ck8bxT(}UE#<;*UOeOQ4NrvLb;k#XVTs!03`(?q0>{VT7+qpGTFP;pk ztA2HuI)sxhvVwh!(Sw3giksjv**BD*qG!@gBYS!-1xq?=dP|N!#Xy9kYhpaO9OR$) zJ%1j{vFGAzr9h$Bv-vqMPDRJ6$AmRmRLbX*FbD}2dq*|bmzIExY%pKdd z9haURzV$6$Cp_J(-V}#^HzXU~d}O2(aIySFj7{IS7}tgQ-`^5~GAvsY>(4r#bR6%m z+8*Mh8XqcU`F1L5FsD!G&fUAFm9Gxx{Em%FGpiT+`fQJ!*|+#AJX<4Dah*bSj9PSU zj*Z#eXpQpks&EgB{4q^q6B9vxeg(_?u>eeG)bF}Dr6*)=v#q*g58qmmGg5wV9S27Y zx9jZbg{cbHG>0+Mv7y!i`7ztRZzuPe)>fAI3y#f>R0#6&elVY#87fGU^z|Pa&T#~hKooDiF>S8rYXNC?AX33KGHmy%gcz0t;D*waGCzK`LyZ+>WqUa|I6ko(m4`@Mekd$F34=OmYnxG= zQT&KgQ042?v;Eh?lFgfTo=N*`)cnS<@PT>L#nOOXQZb!zNpD31jGt{g`8;cvL9T5= z^3KvjW3_5$CmhGWn>;kuxd=nsCMZ~H-h6vCD=zEm3^XX$4RG|v6bu$DxOk7#I+#;< zm_%skD75cVc7A;-B76GuX|BHWMn>ycS=*EIYzG?QsUcER9juX&k?SV+vu-<~(wt+{ z9MB5y5&hH??ky$ID9m&Ddy285vU2@rpDlHH_EwoTtlLki`wM80^^c5-%1N-w*5fii zu!$kFWmRcP$JPCLq?rD>Qyk6?W|Yj{uVk7cRjC73Jksy32!|Rh3Q? zo9ry{ub~spd-dkc?o1okxjut+TwIAo_t_l#wG#CX;H9^;+R|OeKq_~Wf%HuGB~{#L z?)n-u)}LDOyWLy*n{dHwSbu+io=t!;|A7PF+xc~;@WzqFDI4~g%0GF;Eq(TL?80^t zk<$UjF=q9NTz!Z^jhU87{(@Sj6NAma8`I+~^8Oqr5yyAi$!a)AUG5D_o1V(UB&^O{ zwbTh1%xY)YrI)1wS4&tyzaipOutbc0uI<*p5lPcyeR24;F~=s3fm6cohERa8LE?i4 zYaVh-x0s}_ElM^j9&NtS+8Y~}SY7sPkL0(FMr#p-@*O7};t?xC>nn9D&8R6i?@^Xf zmNVf9tlM6HqR}kr^5k%vmzP)dL-Cf^jp=4_h;Au6l`IS9GO%!wWo2c{xJz|aR@3JX z*N?$Ny~gR}C9eIt%@<=@7kjq8Tc~0n!Snw88aZy~2>80`Kx6uu0+;*;A_$WyTn;^( zCI5cjk}^QY**ntpRW1DZ!?a6Pypn%DvWkA%v2*7M7;250vZO};_6HByMUEW2F<2{@ zlo#g7x?Si*Sns4Nq>o%sZ{f0sEDSY|z#bNeO@kn#IZsq4-?-8>**+^j|YEdVuI==X7Um3yHZL8Jj*6*xD4oG*Lu=x5?QrE}F=Q%v)rNi%-yn!P}j_h@rwR4{843t0? z2-u|?CFMNblaSd`{Z+hGuV1c~0TD+iP;jX8v4VmESt1GB{?`&Vzq1x!TXS%#zc$9V zJosSXkawn`LtxI(qRUhw1-@d($I-V|GR7jl+bb89`1|^Xa~`yvj1g7-8YCVla$Gm# z${+1OfB(qfgEvZ2;++iM*$t%Eo0dHlu~#-j7W1zk5YUK?xJkXz6gF`q*$aGg(O*tGv=!7rE2@g_^H3l}2R@6>)J z{C4t#dGly*jZMdAX7WIL*7Sneze<^{{8|=_ii3~SV5rVmGp;7%ix<1-XJ{E2g>C1y z9Dndwr)n=EO0gI-!839}59`!U?7HBv)I3%0izklDCkModSW#9C* z_D{XFF={yzEsptIeaKZ@^&P7J!Vhwv*X90(WCsNDy0pt`^z6H3GR)2Tz9lFsDyoJ^ zIj0#Gt|+J->kB>JSsGZMXFsy{O2cmCy;HYV(5sI9uEVcF<}G85qnsTEQ|nR5Uy zqm%94amX&Q1_f__wYU5G`9;*lX$kJ#yVvsXjWcJSH>O<0@(e++d!Jj!TkAJ(Y}Q}owrpy-R?La; zTKRxuPa42ru!M~wa$?O>5u?4=ejP0;F77sODsXj;jELwdWR$K&w)TJdGBH*oE*7b7 zgSf?uGijGL_kPG+P?nmC5VLHF$D8-|RE5K9H4zWSkPu9fb?!_{ju6{K;*uh zt`_4+(95oF-MS1U!sRfdJilG1zm_#}{lJfGHF#8d1f4K5&VF&$c}ydC|FsW&N~Ji} z?OZY6sOsaUv9mF%TQfT~V%2@jo38KQG{1umM~4qCljrZLj=cXs1lhyScBmx*@6d-3 zTv;$bCJgv|_W%#h+mZT|T2=9--LKUrL!|Ug%3t_c8;Z7J7{XKQ^>Cc0L1blwUu~?0 z5~9O5^UUTb+I0e#kXs#od2Uw+{%Uqzn41@3B#)$XkWqZ!msF7GIG8O51J%NsjRbga z*rx~}Py;xm@z%-Sdk$!K6uup&nQ&%J4Fmc&SY_QO=YL4F9=`C-N0wiiGQY zp49BHY-T6Ut$uftdJJah0D{fl*y7o8Saiii#bs!RR`k&U_vC`c8BJ)H0 zi*@wf3B&tMjuqMqzCcp@otQgVGV|l^hb?Y??nsqA$bEJ5b2A!AXY=zq0sS!Brj2Pa z^>f!f^Z^;~Wj3r=J#k`9cl_g}n;a_Fsk+)i>%DmYFvBdTDrQU%%r*YuM)6p@fy2+6 zE7!8IMY}G{v^vZTWbShqy`&MNTJ)bKQ23X_@T(tj9^7m5TU~Ntb_7BHDM6ON^?hv~ z+~YUwF#&F^OfrfJn4vV=fitdwgp$&%ZMpsE)$R+QnOp;}7PbA~hqm-qKg|x^6?X&V zi#09{mx5o#Yl~(Z^ljHpS{1VR7do6=kQYcU6C+?2;`K^Uf zZgG2cqCH^*&v;wJXHA~z}nL~E+6`<*|3z7`RFu29qC2XX8UIJw^zI?7OJc9o_mMg$W+ zC_Ojo+n8Y<2L$pt!+hX-svJq@1i&Fn#+g*Sv~LpD%{Wo8FgK);aAu#)K%>S!vv0K( z(hKqH1T}kF9Ea*YJ={3f6=WG8Z~nOzNqOzMb&*2)IfQ(XRB0089Q8fb#B6?cir^}E zeNNs$2$uTmivwylziUrtS~Qb5w{`2*%K-gI$G*5>G=l{^Kg=u{tfJ%7CWv5U0PMpg(+|?f{5IpFRof+O?}S2mECpJUgimcsl}X7>)h$K5w_EsOVnXer=tANMy`7-~|cmp31Eg3V<)TssEEF zVPs;IL!=}e$KzY*U4KSK71e!s1KY%!puIat`p`BZw_<2OooPqh0$8iGD^>?09s`J z`t_?e-At?5p)WzgWj2c_27lU%%>XzF5CIkYJN~7-ToZIV&8APYE>T|>p{E)TPIsAe zpa(jJaPVcK&TuT6xhBm+6=SrPU5E`cgs>HKM>ZiY&J8*;CW^7j%fDH04D5 zr-#h}mMwYTaytyO8 z3GdZ)i&1L0VIIzj@FN0#?fdkpc7Kjg@{z!HJ|d?B6k7->e~KIUB1dve{w2-%xIlv;dMWD>ZE+UU}BQHOIcZ2 ze3x#^eti7+@hSLN z5U^Nrcs@qE$E18yP1rFyHDC@_W@bZJyD^kEOXw~g&=ow&kd5{r4TBb=2cj_&gdp_yEyR|Y(RLraHVP7;n7)_DPv-PBIiGDZ4xr2!0W_-Ek&@5 zgt4p4blv?!y^_H$KDX`e#(yHu)Sv+iRYcZ+{{_lax;xF|mVC#JSgGz1*TH0=iCgvx zKT;u->_G}9Zu0#3^UOmN^F;Z|A0&fEB!Hk`;-z1RLBPbL3<_Klt61{+ zb0stfBnx3MDCLPCR?24&0twd>oQgqTf;IuNSnn-8|C*45fy`D-XerH)f&v1f5OF{E zS`+cHq^ztq#kh2@Y3&Zkr#%StwoNX`1<%FIpCfcxk9-x6Q;k%d{&in?6dIrYMzJfE z;N3)Igo$7NT^sXU(lKLaX*DS2NSFRzMC^~{NY_n%cnqS`I=c|)MhXb1f99u~lZ`y0 zltU6Rgx;5qgQr0+Ti)FM`0?miPn9gBzu(ALEpNP#uVVls2}5@n`|V$^3HXq1)qN`6 zvh{4ZoVO-|GXu9&JciwOccX<0#F6Gl&OhH#rCT(gf$~}hk&bBE;w^TvnLn=I$8<*E zm2=IdF-W2j5gn8O{)Pr^NG?i5QX)zy2~$YO_9I=-5k;a-1;>&Re!RzQZ*JXN9jV>j zcb?=UzRWhQKsD9+g#CZx4DswI9b#*CYYnyJZ&esVz=^3PqT3ua*54p08}?pO`8fv& zbC6TQS{}=;e&mRI@igqHkMKJ@cIcXPn=6#zOZu6(nG9=j(kWt9aR5@muD) zooANOb1C4sw3{?#*!L$FANqD<@W=Ptw{Jg1!mTl%f>?AS_2SnP0JzX-4E_cndK1}o zOqu$iCXV8Ok9{`{IwRYZj$S zrmHj)k(h%ZXFZqWvj6x4Y4207^>dU3o-fRHOD}Jfh7zymIFJs9Ns^Rbe(l;&`F-KH zJ!9-xxqWSIZC#ibYuy`=L)0Mus{#7>`uiI{5a}uvF4&7?06OI$*-|8_#MUZY!1ebJ za)Kl0w=@(&h%XIfG#$Eo6fxl|QqoKNE{@F3t@~&M>y?D_FJMJYKY4Ro0^AxRG^6Tb zQwH8cm$m>r33>T@Qvg(o@rfU@)ae1UMkg;$pNFFLDPus%g-*Yg>oJ$414(Ic zva_$}5V5<*n)>xh1BJ!$YS75Hw2JjmiF5@TxQIO5un*xhh{SFWZkGly{!j3-&yZc; zLrZ>r)YKk02P$zCG!l!_ZYTvd?7<>zcW**5C+Pr^t4jX{I6&vvP(E;pLB`6D8Rk~d zluEg7aWS=ScB5gkxYWx3J6j5L@=Hm{_*6v^ccqf*pmY{I=JAbAq&Mm2w~>kV1B7%Aj%VU4=FWowi{VutJa~|2Ogshw-3c#uw#lp|FB?!c zr77hJ1J8{g*F2KV4-Q2mHr>hbm>-RnCSarWt@C|7SqloZl{@ugaau`Yn2Ob`+wAnE zk*P9W|yXId;#unGy`J%Y;w|$6a=tp zoH9VieYVYrMQX1Z8xhyZY(n;V^U+iwUa9Wq)%^(4T|cs|&Q2siYr{MDHO%WBTg^jI z15Sm>In2&oJCI#?ndl%%eFWdbMtbYw>bH6ue51Z6!uFPrefK%HCP^+9mh(iuuB!K1M4kFk=f>XX%UbbC-rFk<+Ocs?RE3s?}Yz3_*W=)qFD*a~rt>xk*(`MPx=k|3zHvM9`OAUm74;#n=jj_7Motv; zK~xE>&kr_38hH|d9Ciug<(dJL5f=S)2|?va1Sr?oK$8|BVer(wSAWU?yepVDrkw^* zsc-)V3J443DmANpy;*@kOkQ4IK80M{0aa~n?YGL_B_$=*&}VICC$ISn8@zQZuy5N5 z)d$r+h=e(Br+*8c43^LsXv+L2?7dMMoGIH=(bF3p@(sk(cPRWc-IjuA`p&bNBt7O^ zfFQVzBQKQ{xx9KnH=jFh?}qAO>qd^KqUUfl0I=bayDeQX83V>gjvC%3ZjOf z(ie)UHH8^-HElEk_#PPyt$yNo3RQqaWP-WYg}D%jT%q9ay`MH(=5@RUY^eeBsLQr8 zkwkeV@{9>j{m-u;JQp5R-DBR8*CGhsJeH*l&=LbEIUVs9^VmUOnAd=uw2qT=z;}b9 zE*23bq`0!OMVL-T3-pqXNR6<2il{JP0)Ovj?>dtv5Bs@l7LNIuV(^x57=7(9F_?f+ z^&(M^Ogct`-8~mI_S&ZWG9GF4($9A{iPX!&%9?Imx?cZAvkl3W7n;~wrd9mRQqC9A zKIA;8Nkl$f%lu9;niGrlI>bmb)R#=X0BF~l(G-7c#?MK{#>US;Y@VZ*>-*xxiy_g zwh7%)JyaN$$8P20`ok^w5qqH@1E=I z=y(ciA>oeH8ZFuf8oQ-_d|;D(Bg7?Y_ot%BHEC^>q~&Zg^^+#u**Yx)`}iQx{^EtC z?E0x+A+ub=!$=EH!CMI|Lo5*1>4SBhfH$4Q3-W6vzUZwCz4iT#;VpZJQUujONdENn zgMlyP4fwZVV$N@Yk=BKI@z!=!zn&r=K1X47Ltv?~fM)!q;}6&kU1tBt5v;n2!9%XX zQ+yQ8EXgCY8DeUzB=h{Qfu{baPuUf!UmUO+eMil>_BAK!>PRSNe>=XQy!{Ok>lGNw z@AUe!YB0hKV3dvxM(598ctGHfPt%UehK7c}Tk>;>ClWY9y<*L9Ll^hLqz9qnSRhhe z_VR}0Y}7sR2waREG#64p4|E*?6`%~((tQpjNFXsHJ((1q>ye>!Igfg>Vr`b3Ph&mFAk75nTLayc3z*#D|i1^ z`NmLopdncnI?jllTp=m&q9RfE`gHhFR}brbM-7NEJ8Yk{z8(H`rJ;#Hwh=sx@P1(hO(SLpYwEYw0-GyZtbfFnXwT$xxy1bUzW!p4r!wG3U@cKkOlWiA?tQ)M!W$eSyQLpKi#p z>Dy-XK+NJtPqGEnv?m>ZXmWl%7q@%~a19XF#5-UB##R@vqlUF0rc?}WRbQGC&tmVc z_6AY@2_KO~pp0?#K}`@)un-;)yhrqTBvzsvp0#VcQ+n5X)(KU?Mj&xO{8+QZ>?oBP zp|g3~;Fr5@<&ETqsEg~xxjRG=fvq8;Pr2qltbT8Mmj@U5`F{#-;k08n8(h)*VNS1 z#(?^+a@t zOf{)c%-OB=v1up}pdUroyaW4>?X&o?9|bd0bT$w#D4s?jB857OQrMo*%!;1U1J*r{ zU8XwOlKL<&%SnliNWf4He`Q#V_Eb5{Pj%bWoD{zdiD|jZEo%jVEA*(X`PKB+BFr>P zm2peE=+#6k2O8@fn`i3%3OREO2m%cwXs z&-~jcG{Aq3mwfqBg}^$PA+%?Ynk!U_3pxQVBSEcev%W?|@HJm8q@RWsPLOS8aeQ|- z;xSh#WV0wJkR|Nl;FaqqC#&yN?sl5Iiu9s{^1;~j(=PYgVY~-QRXQkvo%OjTs;E5t za4VI{`GB1|VF06jL^<43aVQ+LXor}XrUxVUH8*kfDF90Bh zs58+pRK#Xbi-(K^JDpV_cnZovIxbJYTyq@rp@nEdnj$`bmM0=Q*UXjHxB4hehwUs? z1xAdZE6f78dX{<8#o(!(n%{U=MMx4Nr9#dy!`9vpkJElsIzbXZ}(cf~_s&FBw8 zVNTF_VQ%6q_k}-wD_0+ypKNc;x)zT2j)q)@E)pHmDur}C3iUM(3%Fcr9_C<{K0t;q~ ztcO~f^&kg->b(98N$D8?<#R|7h2i>587HB&=W$s8nFJ^-pfz2Ax)itH9V;4-3!p)_1r8` zWTMrzpnl>=$0@G-TIJK*bu7Q96dAE(i(^jqc}q>dzFZyQNq`V(hk`-wCQJ%dzAug! z`q~OTzM>*}5=itULde6?PElwhyTzNWeeEGkHaF(|n5&LdV)I_znGIZF0t9V_+RYWT z=fs>zkG)(I_3+st`;_91X|9N*d#rm@2-suYu)*PrXUfz8)Y$BS_)a495qkZgux)o$ zRaF#d!9@hdE;Jr-&2&i3{#2E|lkZ9!w|^gzt0A=%9hH*{f-Tmd`Hg6`U5@dkl05I4 z4)Q_9F+o_f0Db=pQ)UOF5J88E2pN}9V5xen`WGT8_c!O(_l={Tva^<&UMPCUfd1fy zb=_37E!#W|1{?9Nfs3tKn>W8FDkU`3#QgCyBMnC0_Rh0ok$QtIxs!*NEsi+n#?avS z!Z;l6E6PCXJ*^AZ>kFY=Fs^?F7rTryDU3-3`ro21xiCMpP)6W`jnlT!nMcND$L3xR z#~~1)n?{w$_Vo%rI}W@sqfo9UySs?}+;jAg2%0ZY>a0LR3w|co_*PQ+9cXE(?Ei`; znOMYz0MlK$H-^5!SZUJ+(6XkO9v~CYy3sHHPf=3_%qkWERL?fIp`l@!w0qW0gVuBE zPaQkzePeH)lcP|nbP>;4KFy;`2q7a6QH_eX?9ET!8F#`}=A@F6;IK^}oGNDJW87zL zPY$rVopVJ|lK)uW#s{=k-b^wo-$=jdft#bY9k7?cl6JdWCtu~)Ih6SE)2z^AnOOB$ zy338c{4fVV0)9d^yOIC?|Nd+f$=gwbYHHsQ3wn*wWqfl857~w?6A6LfPk{wWsP$5T z#vVaoc3x;p4>OJv;6oZ=6&1by9Y?bVg7WAMj>Fy*chNZTve~WFXj?WqaRi0g|Bv?N z$Ev#_yMNCZ7$e{gSo6;fPn_sxuzPj>+8@Z+Ie$KFY!2Ws8qIB;J4H%laEdu~4stUC zn#Zb@P$m#DKbe1r97Fn5=s6{7l{H9VdcK%xC$|ooV^O_)yjgZupR|Gh6sKY0{){XS zfweYNc4f;!oa!d}kn)Rqqqs#a5I4Kaa`K5cJ|Ts7l-OP!zV+{7Ej%&6$0rnHe2q+w z4hyH7m8QAcA9=;U9Ofgw+7bH&AntUZyX{( z?6Ldn=Y_hxJ~G9t`k_RI4kBf6l&mY0Sq;6ZCQ8Wyc-A{tv^MmRQW^T(Xy-=b_C#0s zsUE`-H&=8AR6+kH8PEn#R+)(mDc=$4g=3V3^DGYhz@lq+L;{(dpM>zgr69WM5RPGvl@aiM8%(BN@cB<=qUu@Z5 zyo8pHF8qJJ<@Ij27t*dRt&xj2@FF+(@PSW3M`zR>pYUX1deGlaOGmG%CWK@E7M^+W)=O`Uf-xmA( zKmh`;6t=BWTW=+U*I~NQL&d)wD*lrQh~b6iQ^7Y{_>CT*ly3iB>_gAeDu%6$SPRI1 zlGGD`Ci*k-P+)ozfv!4(>k9~q0%dKLFDnWNs^(LWy}PCg0aY>j9Byx?{r`?==ny&# zzKy874Ny4FWp3KO)D?n_f-3lcaP|VEF^w2SjWJHq6Tpl7b!T8lKZ7%c-B?*!>!908 zYpmVG?yf;=Q7~uAVYJtE-YK(_0ADyqE!xCOGuo{8`vnK9DXxQ!Q07S!+of;u&j$tO!oM_wdHwp5`>XIUCTj%7jyr5;RZ#dL-GSg6{3zcn zOcqOz{=CJw{*tRlVQ75+6%L~dI-2}i#BC;5P(}EmMsuk;LIH$S)dx01@5^W0jiShU z%_ot#Eybm9(__lI4I6k%Q~dn>JBL*9nHvjYl?r;SckX=P*@F*O3BdBZtZy#`qE~R3 zbBJqpEQ?;Yl8Vpmz=sch3Y10Pg8A=ukikVvS77&LZC7^)-lvfG^%LRuTpm3q4!uWE zc(=)rVaXq87jJN6$+i}6&aiUH8HvNKU;k-o0`$o3;70xZo0gim{MZrDmO|6g4Ik|` zW?6jlQF3ahwZIqlhp2a`I6Ro>gNl&iRKL~bd^tfu07m;K_D@yo4lIE`mN zUh+jvulmLHOTpzPh2^K+Qo>w3Q}0x;@v6n8$J5>&;6(oTvvw!0q*;leZFQh`(LKq~ z_6Ocy?4evm9tLDX&S70VN1rs%{7bPkh=*62b2QLhIty!E+>4;Ddn!=v+0ytQuNu2S z^}e5U_@I#5nGt_B-{FpFVei!)dwiFI7XyM9V2~Udj$dh;XX%}aj11lp`VSeb(&Se{ z_#u7sc?I{y7v~Y{rdS2V5vu+!P4RF9S%hXR%hIy@&cOA=uw?k$%&D6+*+Yu_6}o8e7(`rDcA) zo2FEbh);-{W~lm#ep+iM9qn5@3>V?CtEB!V3+AMvsTqJ)q_rrk4cVe(pmE~F1HNAE z-`D1@4!;TzGEdqpdv`Kc9Zn@`iHb78XH6-r+wCMSUt^YW>d75?>s#8Hb7Asv1PbAC zz+F!w^h5Yer)#WMapQHR#L)QYU2zVS!xY;mLIh|TNLwh{5xGi9A1vv&Bqj9F#_JOH zOQ{qN8F>yp73c)ef^3K`%&nx=jD$GeEnCJqe3jS7?pm_Zr;9S6ZR9O3yPs5&!|$lKRtG>7`<}X#vsQMw(PUi>R6+yW*bRc5 zWjtX}S@*66rlh1oU};~9ROh$A$`JTajtG{L6(*o8(Fw+8q7%UTK_8Iz!IEQmJ)~S; zz6xy&mzQD=g{p&=sqK32nkDC@S;eE}hW2;k7Y7EG&&}>bh!zO z1pabgMy`Ip!oP;3?${e-J5-3w zj$8gbxp>)nav2RM8%&9>ia zKmlmPwmZ$Fvk63Ze(%2AC4Sfy3yYrdwtv$hm`cS5)g4s=d)F3drViu%F~A23s^Ej8 z5MC$a4vANtJ}vRgrDO?Q7HM9j{;Z*;A4#^nJt3#`pgykxMfvYHqIseWYS5YO`N*sG zi(2o$u$-ad%Jx$jTQR<&$~`(#A>BtVf$^g+DI`-0>;fw42pQ12buns7hH_JMqYS}D zXlJ?vxtlbQqb1}3X&_(Rk%(T2OW2d(lK9vC^=J9u1CE+H-@gL}PvvgHQoj0(=|-2< zf29xm_&78op@ul<5u0ZvvFUpD3ke$m{cGz=X8t?Bf79dHS?o!m+A}qKo($REYcWY? zrTBlR`~?*v`vNUWUEoA<$nTIDUXndXaO0^M3FQ+<31Z*9BbBPdU|~8iEIfQ7G`C0opXmsuTFF4qvPSi}b8d>Lg#ZE&(1ek=p;vEJ z3g~%e^74<7|AOpNR6qP|fa(C6;;+oO7R+P2K{%R4N>g?!sKNoQfdE|nyKgQ<=M~0c zZnY@?E_S8lFU-#XV7?(8GnrD*$FPZXUZ8?{DHpsdV|YIet9s46<@jp|Xhakzy{))+ zcS385@qNf+zl&*`$yPMv`B!Bt9;2EV<23b`>_~!;HU`rc=wB%45Z<>>16u^K17+CK zhYg~V>U9eJYQn<2!pc@KDg18AOd^~0lJkz-p(Ph{3zQgGs~>Pq^wR=^aM&u6 zZTDh?SR5)8sTjOKW1fBL5!r(DFmB4_3#Z3{$H|)Rq3MI8#?DbWs zM&Z;CduvcWGKDnnl33k*$51<3J=7`wUkHsT8+TJr)YBo_59z+bkbYx}P+exLOC0$f z3TG{87DnsrOzsJBwrycGi^Nq>3zp~;0j(|PzP>$?9luE*Hrwbp^VGzFVna#UYOU4jMq+5Wn{gdDu$%V-|g z)~f3SbPkDnZ55L^b9ws`i^d~Y^B^+)?fVl#c5Xc1t3WpNy`B7ZpEv!ql-Tjhi%js{ zx(5e%q#nO`5l?p1A`BLpZbwb?^5sPs@XkN33{~2ikonQ=s3V+0MGX53w0)ZRxmiyx z`dH$nQ8&L7MyW{|vV}-`{*U+hOj3GhT~v;6cZ#YPwu2^-?a`#+42t>v=Ce~z**TXI zN#Z%=-D6O>^GHJ;5nzut-hb%XC9*gHVO7|8qN=90&$wjm$(M(8)(NQZ@DVdII&b}} zLxICcF+vj4^sXMz^rRNH`b&!aAZ%5Yl!9MXOQDu;IZS|V@VRUbvh zS|r?Ni6f%M%j*S{x7z2=hk@kwMtix-BAp56k3K;WGn#BXvTGtMU2nVn)Qi`nqt=om zWqZhe2%pA8cDCqn{$s9z1SI@ahL^uUDG?nm;7iwed-Lxp$H)k(Yn(^a zW6)VhTBnLkw@68y;*W{hs?JRArHf}{3+55hy3ge@X;}2};|^>PhqQfthIC04l#Z&1 zBraQI!dHs~5cnYJPkYs@5*z%bz9c7d_7D z#|UV?eq~gwMmA29ttammjE3HZhwX0?b2G?QTG{*R-rgdde~TtJ`KFgX-}u=r;}&~u zPp(Dpk7sbh`LItPj~We~7v3SZRwd#_gTC!s=?62$2D_7YQTLA>EVeq^)$%O6`(|KS z(Vjc^dV+h))_g{lf zIeaN|GYZ-o&4QlIwB&fz!K!BzRTME?=W_UxQ`3*1XR$oG9cpjb6A$swKp9)7wZe<{ zQIXg~t@;$|`IVMD4s10(eCEuVo7hZj?d8!!7i^^6c70 zZ((6^&&$g_H}@dC!N<>!MoUYJe33%G7@e_+(XD%Z;zbs>?%@Y>(7#n14WRCC-%44u zzI}*_t}GKxyiV#0Z2ND>vqvdL6e-m-xoM|ge(K~_!}>6UIl+S)%_1VA4kxPI`=5#ZPN_!bgXkeQHwGtvgPMApdDD6{i@irP$^B;CN&}4O z2*;j1d|OAPmR?QAdYoSyEhe(KHBVrv;kk2eC}C`ol;p%3-#M|Z5S!O`1C%B=VMw|g z`S`XKhF;N0)+IB+>tH6aQKszc*S7BNmCsJBpgdNL#J&-sE;LSDH?b@(zdLagJEoaW ze+l;uJlEOP_2J`38tl7RpX)!NnHVqmvfv(DEs*j&PuR`C~9qe#wNXQe{ zil(*uJNm4_oZNyrpEY<552X@Y=}?{IRE$*Z*KD~TbbrtPSe+x|y^s)Ym$@-56c6dJ zjn^YCPVm9QhjdOtyLYSH9-I$~OJ(IcCcW-|o|sA%f!>JU3zCK(XdXU4%6pS%)nj?S z&lMHIzrVG31*Ni{|MlyY%8t;aq}0~qw#FMo`0tvIsHH@{9=GXIye_uDvM)|bV_TZDy``!&h!@3koY zkt+P=4#_GUIPiStpVRCcH-^Rs?+Obk3iEP2_HP{1$`Wh@oZKR{+7x| zW6^_Dr(@L5XAW1@#kbweoe1aA+HvkxlJLJEv*5LdXX}e}|E(pB-+!I0rT%AO)5>dj zB4xgA;L398d-Tr~=C-eRtorKW!{1rgf|g#$OPYKVU_$>R%UP>mjJ)H$c~)w9{+~Me z0$h@w$2Dy;8WI-mXigBPzN`I6vrb?Cs&6U}c{%km5jQ^;sdTen)#-izxnL5zjN)p8 zA&D#}|9=k=$CbZWhZ?i4&#m}7GE!u5hdig}Ax`xC+gb9Vp@Cibr4(!Kxa3tBN4m3JeXM7kxW$oB6;pF= zG7(3)-Kmb=Pf1Nh)Vz%f^5&g8*A&c+Z$xoAF@4ToSZlrUz`(V!u9FD^O951Nm}TcK zw6AYMeRw5t|DGS&do&aEV(Og5Mn{U0)lAy@^yT7amYjq81yxm;h|#B?&~O@3jOmt9 zQ~QL3c<1FEN*GKvYLe-npK|*)AxmClR0`8Us0#E}4zN~FIP{h+Tc|M?XPQmEZm2sQ z6DBx5H=V2b-}!c{HJQNn8~0pXgMhm?0J|G_`1m>!#`nCb@7B&c zzt8>Pio?7U|K2%qKXur0NZf}V93-%tnMO`dF3;}IGQgN(?9_c@-K+Za z>C;R39>+tb4V$A9*!LYhC@jfG*3yAYr8^o>WViVmqQ`_CYy+b8&yJ4UWTMc4CGYTh zXkv93m*l7Lp6FvK(&VbJ%=a8jyL!GQY$V-gri@YBw)HfKQ4Wsx7mi*##_B+f>fSt^ ziHu^hTgCHVWq0oXbB2gdfW-E7uOG>IhxOau#3Te;?mN{0bT_+FkovlV0s&g$K{7^O=iFE%69?x9^t=PY+z% zY0;YRF%r1tqlE`~73)pZ_ZW7IiG6tNlAr8Qc=2CM;4G$uZIQ^N`p=Y%on4gE>)#5e>xFFEZeJ@fke;V9V?hP|90L^zEl;#JiUhT*k~&Izd*Zsh83!-bdE7{Ck8%o3@pzAQ7 z_A#VX8Jg>6$YAXoX(o=42(@TXJ`Cq@WO~|R(%Eu<|G*UyyHT3iKlQ7Vbr+XV^%2$4 zVvY^$=l=HiJpOV)dj!+6n0vx-?2PIa3${mQ)7 zQ!%l%;zLJyUzsu1T5{grvi+22^ZLbmTy;1vE>q3@yeCk z>*%_VM5?y^P4COH5~kIkVK{Dkis92kb}zAA(zk-{r#6_A5uX7^$zt=2Erq$Dp z>bBGdXJ<(}^|)n}$vh>whNkZy%0FSJo#ZDU*6qoC-PBLlO0XD3m47sOxbfUzn!@Br zgF+hJrPZ}FGgM9wYorZBS}+X-VgTh@X^Modp$0PhTDFep{PB&qB7oDOIufm zcb6omQPkI?wzdq9LJq8qQFk{UF$vd5-FDCOp=|$sJME`|H*}n#1p=cQyG0ykr18FMBCUg)h1?I@e>T{XA&Pc5$}ybtPN)4=j(0 z+V;7mx9IgLrUA51SEl6$oIca>2? zqjtEZym&HW5LR7&yNH`gGT0LAcXx!P@WW1I&8toeTzruc1bG1NG0{-9TKrgJv_L0rl+4uSu(#Y`uMA>*>QJeeKFql^tW<`mt1vk7BUO%m={>^H9iPq zB4@e%v(0hhed`4432rvB z7By@_Z8bSkuYa9QekY|aunube=}%=R@^}7fq<^vBk-_LLz0mMWR;0X-5f@-?M+E$j z#HjOb-A8YmA$EqF5lb;`XZLozZ9nyq$VZWPmUo>9zMV2|`zAT#2>gD74O=wrew$0& zKqsHPH+dDhauz?BGH9RiH+(wWxrt`1hb~+1X28jfhF@aSFU{$&swb*5$HZLT@6xsD z!5Vtzxt%K%`1#opdylW_zrWltZn*moD~2{4*GKJbDo9G|@{IpbPnP|$z(Sqy(-bKb+U_?>%)N1TV+HDlC&&lm#d`S?6rtwcu$ z3)Q=qwM^78f%$X9u93+`PRa9cg2d=dNI6z zJM4*Zq(+}`OL(}C#rH{Dhc?&xuCC8zB}dMyik9np$=r{6cnoC~TNu2{X^SKw?!TrQJ&Y*) z$0w=v|6(>GB2C_vR3FLj?+VV9mp^f2i>q``@46QkPMlP_xpP7KQxe|8q!}je-dAS3}^3XP$?7Yl8dRmt4PCSul}jonz`ANlLeUsbPOHJ65;VSV=Rf zsH4)Sr!kZh5#__PnsZmK&`?%g`NOeH*J%YsJK2KzWl@oHZ88rG-(lk#o};`^DERc z2GPBHWnStDEfT4STkJCTtRIKor26|8zah8xfK>MK$jZugd>Q$;C~H_VQQupy_tuF6 zRi)d}(c94zPxOPIKhGUKcFePR1G8I!iOhA5rK|sJv)-khc8R(CrR05xL17oaKDYbx zYdIkNYD-H?y{xNL0Ix`y5|1VbdG*l-EI)_Mo}?&exAG`D5Px=d-oXx+^XS9iwUztm zNbW0tXbsPqpWWSB=bk}@fee2WO^jZZ%s$&%s0k=K-zLd(Y=@K-S4Kt#Am&F7qD8RK zQ_C}B1L{Vo#Z~!^H-PqjNfLg>w_(L&UM3C>Pn5aB(Bsk5oSO-(l%GR$f6wCaWhEp8;Q54w8O@tAics#l zI@)tuJ>kq)LOnel-L1^b%=Nij^i3|EbNi8ZXqEdrodYmK@g4n)?+}o0dV71zbNVx2 zUS6rjR3_+(lexv033((XB{l3~zPwv>uRG5wl^y94F}L7@(xQT>f*a zw%kQCxkQX?hb;A3_haO!H#y3&VFSn?)=l6#>|3Iw@*Pul9?gA%df2H~`8*O85jl>6 z_#$Wbd1Of0Tzy(|{d)P6O00_lOtr}3Rn^tY_+xsPXokdHa$g-CZJs-e>#F{d7`tC& zT_AksWh=sPYK)(3J3o2FhZrR}A`m`nIVW0d*H z7EVRa+h>@IstP|$Ughmhx|wzJTB5TF!a*j;5rPrTRPjgGt4#jA5%=tk1POCfCg^ir zW|bUgVOBOK+zwubh)bw-t{*e9nC+PX^g`bV3W4t+31|6-|0N+$A<{UVNwxcPV@rot z>lr$sCXPeO7Jl=Rg6x*f{@)(Uvs*fg6k<`Rv|*g-WQ7JmJn_Oq|Ee3XT)Sql{0T8C z&tZPpP@z6o`DSNl*%qg)XJ_ZFj$f?oyH5m{j=(d|)P-jSm(b0>KrCGp^XzvQJGf@3 z_0F#%i1BmP1Sc{x5qVD^U9jS(~uQ56 zxU+3-rWd~aM1?NcmZCx}XL2mM`EUV?;8SaWG3jq2W5VfnxaGd8iv{@RuWuk8wB9y( zVp!$EjwL7Sp++F_iMVmPIA`W?MaF9SdllV?s#dqGm4_fX`7^i0(Wp#mEM#l6!?ROW zxOj~^+xBi_=1to8xfSDbMR*&pNqzY2bA9k>(f59~5AL)`Fh~aB_^heq%g}$Cmkj*D zgVKnowwP_TAKY7VRSKd(hIJFR;S(G{8&RQuPq9eB#lOEq8W|D7Ag8NF;8e(ifr9XN zVYr^L-*xEyiIaoBguH}LBeTqLY|^87tF{Rt&f>J|0Ze=p8t)8G4EMNT=S!sIPbr|AVK8+mcK6 z!B$XoOmp$QJK$U5!GabYUIi z&A50%ViplcUlvvY{E9W@X2gpag;?@uN0imn<}dAheP5WG-u||Z+ zERvimQWUyb#*{Z153#$<>(6E66uF(~M*0Wfo@Z`?n4&j-4;A<7I^&Y4^R0pO_ExsL zll0@h!h2qS>{04F-zGYoo4|{kGnn<{z!5e*k{(w9?VHa~&R=%YMtJE9)`m z4OsysZhJno#cFW*rW?IbC$XX zh`qbz9wU!#jeJ=q5HxD158?J;rZ8|B zZC$C^bMti-V_^J>fy#+z#Ey~=%aJqH{?duvZ8g&~c&fJy%smQdgnv+-DK+Yg`Iz_Sph`G52|% z7knw04-~^QNSK-nqttuyO<9yl7GnhTY^_>b?G5XEnTUvq-H*17AUsGoJkQK}X;oZ$ z?Z-x1Js+G90J&hdpJbS|Hnc_;N%G(OPEFT@7`4jBc&D{O&lSD?A&B5Fo&&HEAQ=6; z&w03b3&WTLT1;VntgLTu{U#j^*~d#PrVKoCaWJM^6mgA+J_#NyIeL9=N9mh^HG&Wd zl<)y^@y;wA#3|JciYaj^aD&@gID7ME<{m6BJBdl&LY#lD>gqQg zlVp#-Waq8S;e(%hlSW)$s>b8EpD{ZpcV-=8@0~4-`%66})+$6{Z>z-N2>hcLn)B|{`E+GmzJE+ck<*MWl*&nDHhuGgGdb(`<0mPC2ZZsV8^d!21EnlZ7rhJ3 zO&9KIgtM@~MpRZ-j#>>^3sZP}y{U}#L@#q-~9yl$s!S##r z)*i?jdHE{yMJ_JD3~+L9&U(bXKA~iLZyKYuVvIO9uPzc=zHvo?1@_Voq@s>NV6lAr zKW-j8=NqcW7lw+2f(J_}8W<#q_!1Z0JiWu^^IMx*LYn*fuso_5jEsf=#lY8~Kgf!E zNh|6?sN(G{dRI`&&^V>#TNWKvS86oaulAFP)5c%)6_61n6Acj+br}l_Od=?|6tAGE z>4LU=8k?8>U(v9J;&91cBkSyM?%pDW$L*Ui$zmaY37D??haVjUZm;z+Qc=zC|7m3H zdp8y4naoylC6E)$4-t$uZ|yfBDo=YH!Qc?;9w^%=M=rGesr7s#Tu zt#$|0$?ErP0eZ6Bs`y5jMhl+B0|ZT>k$p&ZB1@s;zUANkwq)*0ett>m2d-`1>nH#^ z4a?kx0091zFPV_nMl3 zBC#VOs(#$M6pOJgsFNY7jNM(tb!!Bk4x2Z)<4RpaU&!3~rr~J)U#UNRA~zgG_3fP< z%hSYbauj5L*o5@-42_IgL|3a2w_Hb{3CVaG&n@bQB||Rs2_2I0wNH8knN9H0$a*Kh z#fnueFiGWRIk=J$2?0tm^7@n88%T6=z$uMgKtuQ6(?E*Z36Ke8=4*np{r@BL6AhV^ zeH#rnE)kPd1~LkI+_$>;Qc@?jF|le1knA}+&{)_$@x=%C$v!EnN<1}D9UQnIj>SMhh}ThOTkO)xAmVTVissmD0$?6;eB7uD$4l@td)0_6Xq>vhyp#p>j6=g1kb&j!ae#EORH%84!M3EuB0elPMFyTNZRzFJ_74|AMl`9#4l2c-o9NKym6Z2s-*N(XbjKN{Qin#chY`-9R6kE$2)zPc2!OoT$0x1QU4^6fwZ>Dm0 z);mfmN|&DWGhE}P*B5Fh7s=-B{MTKbkI9}F6ppGu?y^Hn1qh-=II6T zs;ZKc3|dlvk?S*jR9egFD+AV~mz|vZL|)#Ep29p}yZ@C4!Akd&`ErxvdYJhXYu*O6rnYcb#t4BuaiN~!#fGLs5(?A`n&w*6aYU(_{XBH`$5ugd5n zUt!|Ka8_&1d|MNOEZzKdLDr2KA_6ibsRkH{j_~>+7R0QOy1<MJv<a)B# zy1z~u`0A7VRzlN$p#tc@+c@-v$oLa!SzF0`H3TDj30Yno1OPY-I~y|dYHCLH*Oa^B zqDsn3d9~?<0{LCWA7G(q=xFki)j7y{UP=-IO@u~fhFk1;*?^&IYh!Sw?;PEWBBMl= z-d;CPzjD`*iR!^Rdnw4omRffM4tgAusQv9Hri}A3)3HC6b${kX@SuWsK6RxwM2IfBdD3tU}!<5gw}qSv3o1UI=5?bFRNqRkduuo zf4kKs6wbr6n0)nL|b#VeDA}0 zX*4wW_t+TRauUc-G)^-4o=ji7l*se|324!^5!K1Cp77EHF(G)~FVs=^d{y8CO(n!$82MJj18Jasc3$gZu`1N2x5w^7o!2VG$&F0~e8G!#zTf zZ7=V5S|x;JCFU5v+V?s5`CI&L7Eh^KcO0$qVNT+%&UXagxT!9a-q@R@sVnGkW8sUQ zzK1&ws&DsQSdpWFmE54(k=Ld$9N(--s_QEQgl@-uWT{79%s^YP^xWq~HD+;Y0rNF# zNRM-}Cqo5hQKr!3w9fy<>|nl--F`kZ>@PB!QOx>IOvHeXeS@@b-oXt_VdnwtGODXh z868><3@Y+g;x{dtSVhs);cC1=`WIpC>aA4p{&)I_Bmj`uYwm6 zLK}0frI*d$7glFnuVU!7vYI+c4BgSX6FeHfoN7X}GO*M+fr0~qgY0Cbmq|bS|D7)b zQV;R%_oqw^C+-&-*2c}=P#qp&zxuco2`i>F_~raX8^AZ@C#JHd=BMK>?Z~t(SVr`f z!)aCO)aify(W90}`^kePf%t$rVW4K`=Fq09%=-g}SQ_;E);|jU25W7^-&Gr&LoNTE zV*z|5s=xEKm25ZZ$JJp>%PkwrnSM9G+t>sbF_3K56U;+7t|T;&xqSEc@#zAM&w~em zJPt2M8zxTo3M=GJ`vblP6yuNx_UqP;aJbUm!0*r;8FGX+n_szCfBLKxH9_hLhgtXG9l4* zcSJ2F`s2k8ws>JAM8kW_i9*!8^6$0b)wK=Y(DqQYEVj)+i%ycs?ZPx*!>)c`C*g<&?Y! zv1&2dvlmo3$PKACV=Ymizjj4GD0N5gRdD2HH#gT3-4!%KFEO1gi3OkRTq`vCkSBXp zMus_l*jqmS-L3Vp55*!5;Vf_VN_3Dtz?vp$F`Y-Y2%H~qq~zWzYn~T(D<{WvSvzshwQPF8Qho>t_EtKp zT)v7gNYT75fkb7b`=cea^C3BFv(|J>Qx#_8pNfdUtQ9k!)mg*|tDiZC&R z%$%IKOU?D|WtC!;;*zWaS7;gicVf!)u%DMzM3}I4v{S0Pvrco-4eYgi-IlVgTAMnilko;LR<`a zwkH+`wMuz&MiE-r>|FEz(zX-;26#^c@F#PhMt#1ENBdiMw@OPRN#5-IL$JaQeXCNw zu?R>6#16fVXtr!mT(6aT@@zRM+k8au(hs&t!SRAoARZjc(izn%drn31zow173eS!nG_Wz@h?(?gQ2Of zmF5pEZKWTlE~9j)!QKeNa5?dxKeP*j0VwHwJ;F{p5!ug;mU$+<#D_jk;c$E$Z27RQ z&Gh{xq#4b%4f{3$A@}a;DWz!tZM*(FIdn%#M#gEo`b_M?iaSm=jb-K~@PB0h#E|7m zdSgR;9&W4g>sozB9q#$k>Jepe&;Y3&66=kKfX;9G_oBtH;w97GY;r-n0sI*pBX zmY8f`saafcC7Hlpdu0Y3WZEwChmGMSNpxCMbke~D(vB09fOa>vzVuudzH~dQfiWOh zJ|c8i^`>U~%I5%puNQ~EMJmRn3u5a1=8@I)Pz$qGSJ&!zxF!mz0&%+^?}eBL*S%cQn2_fqh!d&{F0%(lEH%@4|c69zI1mVE$kq3v35#-*;*2q5F-!T;1)OJSj>Xq| zl9GDVzM}o*AD#N`)3hRD`52M9{=pI}e5azKjg@GPSpRLBq|*(X zecqLk=9=3Y-%~1Rz`Vu!@8jL#s(Y8r?U-1uPGvRoi7`bmKpIGFm|&WyH0UhnGW`sx zKhbR2-fY?|r+*B5SD;%o0S@Ar%-f9QW}Y>ywlH^STEMZ7YTOy|L7elS9f|GLC2i~P z|3m>%VsbJ5H7fMA&uXTY>a{p}feU&a3G&*q+;M556h=cs+Lxcl#`%Y86eMTCk(wf7 zP4MIAPl2FuD+Cr?J-&Q-Ypc}q?=@95&8C@LtzI9+%Os~>3@9W5v>VT6{S;C`07oWV zB}LTdiGQ@s&jeGuv0B>Oysu(h8;oL;CV~?E)L#^I*nD;FAVh!)7-+KZAGfp(-7xj^@|NF^7D2d!IME3z&c$?ed)ip#E)P1VYDz7JxMWEu zmrQXXtqV*v6iPIxhGFc0MESO+D6g_fTVH7ws@C7=9x&~I#j2D6KAq_|LVy&ql*`{O zI>(UE#!2(2J8K{v5>jZ*?Mre9NfHB@x_e={L} zc7fnx@X;UJnnr{c-$>kI&861V#bQ>-Klpi|4h> zo#W}7iSKS9YkG5Y6H+BnkOiuAKe$XpLpLIq9Y4r{>o9{2z+jg!uzFd(nitdcU$ZDs|yJvF`UoK;h^7;`S;T@mVE7}bn z$I%ey)oPa}WPlyh$$If3ofYVwI(cJxRoZoC>~g#Yc75F@l^lh~mX9V^xAI9Z{K#$m zdCY&A>H;fM;q5g zH-BqAUDH5o<3R{>L1K52Z=3EwDeRbv^53~qKr^LB2{!km0$N+X;js!73)ANcOIF#DUn=c3f0J5gicVao#KHiOb812ReUBHCa70k`kEQ6S{%cWd z;lOl)vl?z3YLN!&kL{>?$idP?z0Y`@03Puqiw2DeOOSr)`T0}9!h!`zP5+?=D`o=S zIvh%mte_asu+oi{iq|9-NHWu)MC#`5-UJF+Z7#+7pp!sd6Ga|ly&Ej%vX=0_|I+V6 zm`|W%3D<4`nXbNXfCLFZ513R*9}*}@X7e_X*hv0eGm}!soG|j=e$i8kzz{3FSLmR# z?l~bLq4>G_`uar)WO|aA<04YtMc2^z{6-(_Bk(2_$`fnR&j|H#Vk&pq9;>{vtug z^0C|KIOBi&)w#4Kd?NSvSRm9cEFwYyoopl(a>9Vpt`GbxKM=4vi;sE(rJTk~MZ#Mf z8)W~(jI6DK&N1j5)eky9SB2C7(t?0NCYa&n%a?T5GBadCn84;DN}l)Ypc8~8$N;)| zc?Hez;3N0_02q_FTItu3&R4B2Z(E<@Aw(8Md(Vw&CFJ=+`X!cZnb4rD`U zaBKksP`zlQ3xw#2Jw?I)=7UksR1jT%%P_-s6Lk|o!D1*tm`BJc?v{Vkt&YwW%oJr4c z7K@GPwmid9HmF9^DY$(Vks)YvdA+s-vgS169vsl)*3!lXhnkui3m2E2i|d^sqq#d> z+5cEO)V8?)@Mj98&3LUJ@V^iR_OM~l{pfuohY9@A=B6eLG#U*AX0G)rpTEZ!=S3$5 zKq(W(Nq7Wh7L~pmR;A+g)512V_aW(wg^k_R-j0)#le50L*$g^^4?6{@a^0Z?g*jTr zu{dPhOy;Zk@(j18_3kim-gaVM$n54*Hc*0|O(WM+Lr+&396$x{hpYY&Hb2+9~hH*SV@L6M;C>3F2Brp6`OVUXIsTn-Y!{EYdq0}ml`^7V#|?6 zLZ=Es@(0;HC_(7p3C~bJ7MhgtA>D|=C_Y0!l%~c;SsG}o*nR*d-{h%jY1m0>w|GAN zWd-F;gGmk{X!{J!w#Y$b;ucRrab>)br9M{@vhUIG(%R@F-UK5zw~`)E4@N^*NbGZ} zva+FIv@hSav3Zw_G{}PW+6+Pj`DVMk4GpcM^D(}RvLK&uSe!AFVh?iN)SyDZ2^yet z<}w}bhd!XNdiH$WrkZ_Ll?>XU6xa`0H&9<(P+AKOy^uek)?P`beOk=L${IyB1m&t& zK|w*R?Cc8Gf0300>|b|z-5*DHP*?7JDkNoKFtPB1RTc$x>Uj-tUU;D!459HeHoR{m z8B?GlpIBO2S{`bmC#tATnaV(88lQxO1a9tq{19v-QrCs<67lDriX zv@$FLmLd~$nXsif@xl+vtX%czVU1WiIv#e_E&ND> zFAYEi>^!U-c~H0kuK@l20jzNM?+Hv&1aNd+57;GwClc}UB`t961VAPaFjLA$);tL_ zyIN{*$$>kp3>q<0%gaYFxUVUoOpOJwTgd52W$>{C^fWAi?m!?OO9f^R*{aCoz=U8Z z-tEqiiGlMiY>tHFbYOg}L(#Ou9ThTxk4s(o8vU6%M5*7Iz!kF$PY z4_|P+YTR;bQ~StL{cTug(XYTpw^GB)==gYhBhjo*adN}nb1i{)*(TkC4zkADyxv?N zI}UC?EFZfxF_CyaquaJ(;%GEzE%=0#?7oi=_v0jMBWcxZ#$zxo5nt|pNMBl7lAQKK z6&~wI%sPfS z=yn5=156wHpw#fD*G{?9c3W-rwGc>8{G36g|+r#|2?7I5OllG>C=iIx9V2YDy6j5Xft5lcFrZoW#OB zdUyze?thkVki)W(r6sFPcX~X`N}@k5ti5&LtX_C~? zk4nzmJY#w%ztr}H;ptfP!psaND)9Z70^I+>;i2;<^Z0AYe|{j#AI|q-=R0befU=EL zY=Cw?msu9wVU92f3jwsqY69Uh_oe=;uXn)Tkj?6aa$iOCcLq)t78Ee*5T|fC0g)6J z6^V(AoCG4howIYf_*w(95$_U{kXXTbvU7D+0bUUt4R<|F{=nRqJuw_lulL)^fdLlT znwpxDlEDqO*am|7i-bMa5F`Yu&_6w)rC5gDTKe&7*|}w)V+1< zp6MYdh%4p(`~pFdQ?QJnpe|@KNX`db5rdme>9o0_sY#Wl$w^F1oRtprRKyeYcD(fL zhP;A8TT6=qEUgR0TFyYTn|m0z(MIi^2-PF_L`2WQu84z}S?uU>zyffGiX3OEg0Ike z3Z5KoY1Nbi&A#`~{SVOC?p9y!r}PI$osNC4Mo^j1tyLD;)tJrr zomx;(PzwTD>`b=ZPSA(+K}}7~$MW)_!rYu3usVbpj$CaRirCQ0fL$U@lYoFA``tS= zII5BR0{M;Sn$Ggl&_)>q@ScEX-vi~WCt$bKZ`^nQf=H^auEnuOp1*aWip8kX4Gm`W z8wl)i<3z~G%gfKgWQcCS6%`A9shOFjFHba>Lkk}z_{+yygwHMwg9?yjx@D zY}DA;sCi8u!#>~G-o5~X03*#_|KTc|?(N%`l(TeT?ao0vjwJA96MfTv2tdxC%b_pW zzTEiMuiO{ujN%@OKz3@Im@vODk@h`Z>JI!h2d#PZO@kc%4B0_1g1cXyUi4l9f9?hJ zKKcfVM=THmKh4N+-FleX=KJ1UtvxkwbJN2QdfD6s6Pfw*&n)N*J%?6{W)cz-`nNJ( z`P&Ex3v+;imz6MGTW6<6czF0-5T$wdPA5JH3V0|&LPBbwJqrh7EIbVBCZfbkqX5FE#2LKxVyAMjN>$og@eN;E>0UCAMd;Qg%drn zq;u;QHcHHW8VhtFr=Wrhq{XZtO1;d(bKmTVqT;L@uNtQ;3@ipdo>@~v4}IWUp<}_y zIPn)q=s_&=5TZM4*xCEgH&ENmECKdD6jcpN6xs>Xef;!^O;VEH&(9CZ{z2dZ{yR4G z2xaBqz=HNKZl0bB0RfWrf9L5heg)PS?%8}>e{dQSglKdMd|n7%KZtunJDEUeE+Q7V z&o3w>RI~(MF>;8)`3hZg98K3278*eZF$%=X`l~+y&X98R5`sSf4=I5U!uktMR}B>v zh3!Gdq75W{NGT{HfS6~@46<%f04YGbPN%MGbn&7sxTx>1L7-sag1 zXP1|kPk~&=`o=~W6nE`R&CEoAgA0{)L-T=Iy>`^>5MRQ=@r7>OlS@mFfLYep*XKsV z%65=3MFrf$S0AAV93iZ8F=$5|i-}Z-b#{XXvC05oh48g&AijkUj)omLTu|ADj1Njk zUl(IuV#_?FM%Ec!S#NYG2n|nyFR1M6D>gbhdTHkxh!2u6GJ@c{_hjAPIJ6@e>FMi3 z!r7Z#ykungm=%EF^``jw`l3+&0Rhha1BFHr;2D#->e`9Eb_em&fcE;}qIFW zvZl$&Nh~2q?3GVRo~^-_3VkVP8bC@0tWfCB=ngS04ov?BG}-_ine+GV-Gh@&&dG_# z#MIOs&>99Q{|ktRZfrbkda9|TL-y+RYXClCK|KvmN=hoPu&@>6qD!FNh)VJeMMceK zWJRw!?0+f%?-ubEs;h_n`AEMFtlqwTdm4J^HG(Rkj-eqQf``De1)K;bS#(2r_$#z% z4hL5v5W4kLddv|(BbW$K`l#LNl1vfz%$XsCwUpa~*I@P!rc8~@IMZu_7ymD9Xjo+b ezm_2c^R~=bUrybXXQ2Isx}l`0_+H*3{Qm(1bldCz literal 0 HcmV?d00001 diff --git a/resnet_32/resnet_32.prototxt b/resnet_32/resnet_32.prototxt index f043cbd..13447b6 100644 --- a/resnet_32/resnet_32.prototxt +++ b/resnet_32/resnet_32.prototxt @@ -56,7 +56,7 @@ layer { } bias_filler { type: "constant" - value: 0.2 + value: 0 } } } @@ -114,10 +114,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -155,10 +151,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -203,10 +195,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -251,10 +239,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -307,10 +291,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -355,10 +335,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -403,10 +379,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -459,10 +431,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -507,10 +475,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -555,10 +519,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -611,10 +571,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -652,10 +608,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -700,10 +652,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -748,10 +696,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -804,10 +748,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -852,10 +792,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -900,10 +836,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -956,10 +888,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1004,10 +932,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1052,10 +976,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1108,10 +1028,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1156,10 +1072,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1204,10 +1116,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1260,10 +1168,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1301,10 +1205,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1349,10 +1249,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1397,10 +1293,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1453,10 +1345,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1501,10 +1389,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1549,10 +1433,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } } @@ -1605,10 +1485,6 @@ layer { weight_filler { type: "msra" } - bias_filler { - type: "constant" - value: 0.2 - } } }