--- /dev/null
+name: "CIFAR10_18pct"\r
+layers {\r
+ layer {\r
+ name: "cifar"\r
+ type: "data"\r
+ source: "cifar10_db/cifar-train-leveldb"\r
+ meanfile: "mean.binaryproto"\r
+ batchsize: 100\r
+ }\r
+ top: "data"\r
+ top: "label"\r
+}\r
+# ------------------------ layer 1 -----------------------------\r
+layers {\r
+ layer {\r
+ name: "pad1"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "data"\r
+ top: "pad1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv1"\r
+ type: "conv"\r
+ num_output: 32\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.0001\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.\r
+ blobs_lr: 2.\r
+ }\r
+ bottom: "pad1"\r
+ top: "conv1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool1"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: MAX\r
+ }\r
+ bottom: "conv1"\r
+ top: "pool1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu1"\r
+ type: "relu"\r
+ }\r
+ bottom: "pool1"\r
+ top: "pool1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "norm1"\r
+ type: "lrn"\r
+ local_size: 3\r
+ alpha: 0.00005\r
+ beta: 0.75\r
+ }\r
+ bottom: "pool1"\r
+ top: "norm1"\r
+}\r
+# --------------------------- layer 2 ------------------------\r
+layers {\r
+ layer {\r
+ name: "pad2"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "norm1"\r
+ top: "pad2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv2"\r
+ type: "conv"\r
+ num_output: 32\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.01\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.\r
+ blobs_lr: 2.\r
+ }\r
+ bottom: "pad2"\r
+ top: "conv2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu2"\r
+ type: "relu"\r
+ }\r
+ bottom: "conv2"\r
+ top: "conv2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool2"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: AVE\r
+ }\r
+ bottom: "conv2"\r
+ top: "pool2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "norm2"\r
+ type: "lrn"\r
+ local_size: 3\r
+ alpha: 0.00005\r
+ beta: 0.75\r
+ }\r
+ bottom: "pool2"\r
+ top: "norm2"\r
+}\r
+#-----------------------layer 3-------------------------\r
+layers {\r
+ layer {\r
+ name: "pad3"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "norm2"\r
+ top: "pad3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv3"\r
+ type: "conv"\r
+ num_output: 64\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.01\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ }\r
+ bottom: "pad3"\r
+ top: "conv3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu3"\r
+ type: "relu"\r
+ }\r
+ bottom: "conv3"\r
+ top: "conv3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool3"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: AVE\r
+ }\r
+ bottom: "conv3"\r
+ top: "pool3"\r
+}\r
+#--------------------------layer 4------------------------\r
+layers {\r
+ layer {\r
+ name: "ip1"\r
+ type: "innerproduct"\r
+ num_output: 10\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.01\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.\r
+ blobs_lr: 2.\r
+ weight_decay: 250.\r
+ weight_decay: 0.\r
+ }\r
+ bottom: "pool3"\r
+ top: "ip1"\r
+}\r
+#-----------------------output------------------------\r
+layers {\r
+ layer {\r
+ name: "loss"\r
+ type: "softmax_loss"\r
+ }\r
+ bottom: "ip1"\r
+ bottom: "label"\r
+}\r
--- /dev/null
+# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
+# then another factor of 10 after 10 more epochs (5000 iters)
+
+# The training protocol buffer definition
+train_net: "cifar10_18pct.prototxt"
+# The testing protocol buffer definition
+test_net: "cifar10_18pct_test.prototxt"
+# test_iter specifies how many forward passes the test should carry out.
+# In the case of MNIST, we have test batch size 100 and 100 test iterations,
+# covering the full 10,000 testing images.
+test_iter: 100
+# Carry out testing every 500 training iterations.
+test_interval: 500
+# The base learning rate, momentum and the weight decay of the network.
+base_lr: 0.001
+momentum: 0.9
+weight_decay: 0.004
+# The learning rate policy
+lr_policy: "fixed"
+# Display every 100 iterations
+display: 100
+# The maximum number of iterations
+max_iter: 100000
+# snapshot intermediate results
+snapshot: 5000
+snapshot_prefix: "cifar10_18pct"
+# solver mode: 0 for CPU and 1 for GPU
+solver_mode: 1
+
+device_id: 0
--- /dev/null
+name: "CIFAR10_18pct_test"\r
+layers {\r
+ layer {\r
+ name: "cifar"\r
+ type: "data"\r
+ source: "cifar10_db/cifar-test-leveldb"\r
+ meanfile: "mean.binaryproto"\r
+ batchsize: 100\r
+ }\r
+ top: "data"\r
+ top: "label"\r
+}\r
+# ------------------------ layer 1 -----------------------------\r
+layers {\r
+ layer {\r
+ name: "pad1"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "data"\r
+ top: "pad1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv1"\r
+ type: "conv"\r
+ num_output: 32\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.0001\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.\r
+ blobs_lr: 2.\r
+ }\r
+ bottom: "pad1"\r
+ top: "conv1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool1"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: MAX\r
+ }\r
+ bottom: "conv1"\r
+ top: "pool1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu1"\r
+ type: "relu"\r
+ }\r
+ bottom: "pool1"\r
+ top: "pool1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "norm1"\r
+ type: "lrn"\r
+ local_size: 3\r
+ alpha: 0.00005\r
+ beta: 0.75\r
+ }\r
+ bottom: "pool1"\r
+ top: "norm1"\r
+}\r
+# --------------------------- layer 2 ------------------------\r
+layers {\r
+ layer {\r
+ name: "pad2"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "norm1"\r
+ top: "pad2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv2"\r
+ type: "conv"\r
+ num_output: 32\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.01\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.\r
+ blobs_lr: 2.\r
+ }\r
+ bottom: "pad2"\r
+ top: "conv2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu2"\r
+ type: "relu"\r
+ }\r
+ bottom: "conv2"\r
+ top: "conv2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool2"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: AVE\r
+ }\r
+ bottom: "conv2"\r
+ top: "pool2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "norm2"\r
+ type: "lrn"\r
+ local_size: 3\r
+ alpha: 0.00005\r
+ beta: 0.75\r
+ }\r
+ bottom: "pool2"\r
+ top: "norm2"\r
+}\r
+#-----------------------layer 3-------------------------\r
+layers {\r
+ layer {\r
+ name: "pad3"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "norm2"\r
+ top: "pad3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv3"\r
+ type: "conv"\r
+ num_output: 64\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.01\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ }\r
+ bottom: "pad3"\r
+ top: "conv3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu3"\r
+ type: "relu"\r
+ }\r
+ bottom: "conv3"\r
+ top: "conv3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool3"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: AVE\r
+ }\r
+ bottom: "conv3"\r
+ top: "pool3"\r
+}\r
+#--------------------------layer 4------------------------\r
+layers {\r
+ layer {\r
+ name: "ip1"\r
+ type: "innerproduct"\r
+ num_output: 10\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.01\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.\r
+ blobs_lr: 2.\r
+ weight_decay: 250.\r
+ weight_decay: 0.\r
+ }\r
+ bottom: "pool3"\r
+ top: "ip1"\r
+}\r
+#-----------------------output------------------------\r
+layers {\r
+ layer {\r
+ name: "prob"\r
+ type: "softmax"\r
+ }\r
+ bottom: "ip1"\r
+ top: "prob"\r
+}\r
+layers {\r
+ layer {\r
+ name: "accuracy"\r
+ type: "accuracy"\r
+ }\r
+ bottom: "prob"\r
+ bottom: "label"\r
+ top: "accuracy"\r
+}\r
--- /dev/null
+# test 80sec config\r
+name: "CIFAR10_80sec"\r
+layers {\r
+ layer {\r
+ name: "cifar"\r
+ type: "data"\r
+ source: "cifar10_db/cifar-train-leveldb"\r
+ meanfile: "mean.binaryproto"\r
+ batchsize: 100\r
+ }\r
+ top: "data"\r
+ top: "label"\r
+}\r
+# ------------------------ layer 1 -----------------------------\r
+layers {\r
+ layer {\r
+ name: "pad1"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "data"\r
+ top: "pad1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv1"\r
+ type: "conv"\r
+ num_output: 32\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.0001\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.0\r
+ blobs_lr: 2.0\r
+ }\r
+ bottom: "pad1"\r
+ top: "conv1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool1"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: MAX\r
+ }\r
+ bottom: "conv1"\r
+ top: "pool1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu1"\r
+ type: "relu"\r
+ }\r
+ bottom: "pool1"\r
+ top: "pool1"\r
+}\r
+# --------------------------- layer 2 ------------------------\r
+layers {\r
+ layer {\r
+ name: "pad2"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "pool1"\r
+ top: "pad2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv2"\r
+ type: "conv"\r
+ num_output: 32\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.01\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.0\r
+ blobs_lr: 2.0\r
+ }\r
+ bottom: "pad2"\r
+ top: "conv2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu2"\r
+ type: "relu"\r
+ }\r
+ bottom: "conv2"\r
+ top: "conv2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool2"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: AVE\r
+ }\r
+ bottom: "conv2"\r
+ top: "pool2"\r
+}\r
+#-----------------------layer 3-------------------------\r
+layers {\r
+ layer {\r
+ name: "pad3"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "pool2"\r
+ top: "pad3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv3"\r
+ type: "conv"\r
+ num_output: 64\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.01\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.0\r
+ blobs_lr: 2.0\r
+ }\r
+ bottom: "pad3"\r
+ top: "conv3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu3"\r
+ type: "relu"\r
+ }\r
+ bottom: "conv3"\r
+ top: "conv3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool3"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: AVE\r
+ }\r
+ bottom: "conv3"\r
+ top: "pool3"\r
+}\r
+#--------------------------layer 4------------------------\r
+layers {\r
+ layer {\r
+ name: "ip1"\r
+ type: "innerproduct"\r
+ num_output: 64 \r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.1\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.0\r
+ blobs_lr: 2.0\r
+ }\r
+ bottom: "pool3"\r
+ top: "ip1"\r
+}\r
+#--------------------------layer 5------------------------\r
+layers {\r
+ layer {\r
+ name: "ip2"\r
+ type: "innerproduct"\r
+ num_output: 10\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.1\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.0\r
+ blobs_lr: 2.0\r
+ }\r
+ bottom: "ip1"\r
+ top: "ip2"\r
+}\r
+#-----------------------output------------------------\r
+layers {\r
+ layer {\r
+ name: "loss"\r
+ type: "softmax_loss"\r
+ }\r
+ bottom: "ip2"\r
+ bottom: "label"\r
+}\r
--- /dev/null
+# reduce the learning rate after 8 epochs (4000 iters) by a factor of 10
+
+# The training protocol buffer definition
+train_net: "cifar10_80sec.prototxt"
+# The testing protocol buffer definition
+test_net: "cifar10_80sec_test.prototxt"
+# test_iter specifies how many forward passes the test should carry out.
+# In the case of MNIST, we have test batch size 100 and 100 test iterations,
+# covering the full 10,000 testing images.
+test_iter: 100
+# Carry out testing every 500 training iterations.
+test_interval: 500
+# The base learning rate, momentum and the weight decay of the network.
+base_lr: 0.001
+momentum: 0.9
+weight_decay: 0.004
+# The learning rate policy
+lr_policy: "fixed"
+# Display every 100 iterations
+display: 100
+# The maximum number of iterations
+max_iter: 100000
+# snapshot intermediate results
+snapshot: 4000
+snapshot_prefix: "cifar10_80sec"
+# solver mode: 0 for CPU and 1 for GPU
+solver_mode: 1
+
+device_id: 1
--- /dev/null
+# test 80sec config\r
+name: "CIFAR10_80sec_test"\r
+layers {\r
+ layer {\r
+ name: "cifar"\r
+ type: "data"\r
+ source: "cifar10_db/cifar-test-leveldb"\r
+ meanfile: "mean.binaryproto"\r
+ batchsize: 100\r
+ }\r
+ top: "data"\r
+ top: "label"\r
+}\r
+# ------------------------ layer 1 -----------------------------\r
+layers {\r
+ layer {\r
+ name: "pad1"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "data"\r
+ top: "pad1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv1"\r
+ type: "conv"\r
+ num_output: 32\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.0001\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.0\r
+ blobs_lr: 2.0\r
+ }\r
+ bottom: "pad1"\r
+ top: "conv1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool1"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: MAX\r
+ }\r
+ bottom: "conv1"\r
+ top: "pool1"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu1"\r
+ type: "relu"\r
+ }\r
+ bottom: "pool1"\r
+ top: "pool1"\r
+}\r
+# --------------------------- layer 2 ------------------------\r
+layers {\r
+ layer {\r
+ name: "pad2"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "pool1"\r
+ top: "pad2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv2"\r
+ type: "conv"\r
+ num_output: 32\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.01\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.0\r
+ blobs_lr: 2.0\r
+ }\r
+ bottom: "pad2"\r
+ top: "conv2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu2"\r
+ type: "relu"\r
+ }\r
+ bottom: "conv2"\r
+ top: "conv2"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool2"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: AVE\r
+ }\r
+ bottom: "conv2"\r
+ top: "pool2"\r
+}\r
+#-----------------------layer 3-------------------------\r
+layers {\r
+ layer {\r
+ name: "pad3"\r
+ type: "padding"\r
+ pad: 2\r
+ }\r
+ bottom: "pool2"\r
+ top: "pad3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "conv3"\r
+ type: "conv"\r
+ num_output: 64\r
+ kernelsize: 5\r
+ stride: 1\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.01\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.0\r
+ blobs_lr: 2.0\r
+ }\r
+ bottom: "pad3"\r
+ top: "conv3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "relu3"\r
+ type: "relu"\r
+ }\r
+ bottom: "conv3"\r
+ top: "conv3"\r
+}\r
+layers {\r
+ layer {\r
+ name: "pool3"\r
+ type: "pool"\r
+ kernelsize: 3\r
+ stride: 2\r
+ pool: AVE\r
+ }\r
+ bottom: "conv3"\r
+ top: "pool3"\r
+}\r
+#--------------------------layer 4------------------------\r
+layers {\r
+ layer {\r
+ name: "ip1"\r
+ type: "innerproduct"\r
+ num_output: 64 \r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.1\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.0\r
+ blobs_lr: 2.0\r
+ }\r
+ bottom: "pool3"\r
+ top: "ip1"\r
+}\r
+#--------------------------layer 5------------------------\r
+layers {\r
+ layer {\r
+ name: "ip2"\r
+ type: "innerproduct"\r
+ num_output: 10\r
+ weight_filler {\r
+ type: "gaussian"\r
+ std: 0.1\r
+ }\r
+ bias_filler {\r
+ type: "constant"\r
+ }\r
+ blobs_lr: 1.0\r
+ blobs_lr: 2.0\r
+ }\r
+ bottom: "ip1"\r
+ top: "ip2"\r
+}\r
+#-----------------------output------------------------\r
+layers {\r
+ layer {\r
+ name: "prob"\r
+ type: "softmax"\r
+ }\r
+ bottom: "ip2"\r
+ top: "prob"\r
+}\r
+layers {\r
+ layer {\r
+ name: "accuracy"\r
+ type: "accuracy"\r
+ }\r
+ bottom: "prob"\r
+ bottom: "label"\r
+ top: "accuracy"\r
+}\r