add cifar example using LRN_MAP (just like the cuda-convnet layers-18pct
authorJeff Donahue <jeff.donahue@gmail.com>
Sat, 29 Mar 2014 07:51:49 +0000 (00:51 -0700)
committerJeff Donahue <jeff.donahue@gmail.com>
Tue, 8 Apr 2014 18:36:18 +0000 (11:36 -0700)
architecture) instead of LRN

examples/cifar10/cifar10_full_lrn_map_solver.prototxt [new file with mode: 0644]
examples/cifar10/cifar10_full_lrn_map_solver_lr1.prototxt [new file with mode: 0644]
examples/cifar10/cifar10_full_lrn_map_solver_lr2.prototxt [new file with mode: 0644]
examples/cifar10/cifar10_full_lrn_map_test.prototxt [new file with mode: 0644]
examples/cifar10/cifar10_full_lrn_map_train.prototxt [new file with mode: 0644]
examples/cifar10/train_full_lrn_map.sh [new file with mode: 0755]

diff --git a/examples/cifar10/cifar10_full_lrn_map_solver.prototxt b/examples/cifar10/cifar10_full_lrn_map_solver.prototxt
new file mode 100644 (file)
index 0000000..492912a
--- /dev/null
@@ -0,0 +1,28 @@
+# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
+# then another factor of 10 after 10 more epochs (5000 iters)
+
+# The training protocol buffer definition
+train_net: "cifar10_full_lrn_map_train.prototxt"
+# The testing protocol buffer definition
+test_net: "cifar10_full_lrn_map_test.prototxt"
+# test_iter specifies how many forward passes the test should carry out.
+# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
+# covering the full 10,000 testing images.
+test_iter: 100
+# Carry out testing every 1000 training iterations.
+test_interval: 1000
+# The base learning rate, momentum and the weight decay of the network.
+base_lr: 0.001
+momentum: 0.9
+weight_decay: 0.004
+# The learning rate policy
+lr_policy: "fixed"
+# Display every 200 iterations
+display: 200
+# The maximum number of iterations
+max_iter: 60000
+# snapshot intermediate results
+snapshot: 10000
+snapshot_prefix: "cifar10_full_lrn_map"
+# solver mode: 0 for CPU and 1 for GPU
+solver_mode: 1
diff --git a/examples/cifar10/cifar10_full_lrn_map_solver_lr1.prototxt b/examples/cifar10/cifar10_full_lrn_map_solver_lr1.prototxt
new file mode 100644 (file)
index 0000000..c51a4e6
--- /dev/null
@@ -0,0 +1,28 @@
+# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
+# then another factor of 10 after 10 more epochs (5000 iters)
+
+# The training protocol buffer definition
+train_net: "cifar10_full_lrn_map_train.prototxt"
+# The testing protocol buffer definition
+test_net: "cifar10_full_lrn_map_test.prototxt"
+# test_iter specifies how many forward passes the test should carry out.
+# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
+# covering the full 10,000 testing images.
+test_iter: 100
+# Carry out testing every 1000 training iterations.
+test_interval: 1000
+# The base learning rate, momentum and the weight decay of the network.
+base_lr: 0.0001
+momentum: 0.9
+weight_decay: 0.004
+# The learning rate policy
+lr_policy: "fixed"
+# Display every 200 iterations
+display: 200
+# The maximum number of iterations
+max_iter: 65000
+# snapshot intermediate results
+snapshot: 5000
+snapshot_prefix: "cifar10_full_lrn_map"
+# solver mode: 0 for CPU and 1 for GPU
+solver_mode: 1
diff --git a/examples/cifar10/cifar10_full_lrn_map_solver_lr2.prototxt b/examples/cifar10/cifar10_full_lrn_map_solver_lr2.prototxt
new file mode 100644 (file)
index 0000000..d15b115
--- /dev/null
@@ -0,0 +1,28 @@
+# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
+# then another factor of 10 after 10 more epochs (5000 iters)
+
+# The training protocol buffer definition
+train_net: "cifar10_full_lrn_map_train.prototxt"
+# The testing protocol buffer definition
+test_net: "cifar10_full_lrn_map_test.prototxt"
+# test_iter specifies how many forward passes the test should carry out.
+# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
+# covering the full 10,000 testing images.
+test_iter: 100
+# Carry out testing every 1000 training iterations.
+test_interval: 1000
+# The base learning rate, momentum and the weight decay of the network.
+base_lr: 0.00001
+momentum: 0.9
+weight_decay: 0.004
+# The learning rate policy
+lr_policy: "fixed"
+# Display every 200 iterations
+display: 200
+# The maximum number of iterations
+max_iter: 70000
+# snapshot intermediate results
+snapshot: 5000
+snapshot_prefix: "cifar10_full_lrn_map"
+# solver mode: 0 for CPU and 1 for GPU
+solver_mode: 1
diff --git a/examples/cifar10/cifar10_full_lrn_map_test.prototxt b/examples/cifar10/cifar10_full_lrn_map_test.prototxt
new file mode 100644 (file)
index 0000000..a8c81ed
--- /dev/null
@@ -0,0 +1,179 @@
+name: "CIFAR10_full_test"
+layers {
+  name: "cifar"
+  type: DATA
+  top: "data"
+  top: "label"
+  data_param {
+    source: "cifar10-leveldb/cifar-test-leveldb"
+    mean_file: "mean.binaryproto"
+    batch_size: 100
+  }
+}
+layers {
+  name: "conv1"
+  type: CONVOLUTION
+  bottom: "data"
+  top: "conv1"
+  blobs_lr: 1
+  blobs_lr: 2
+  convolution_param {
+    num_output: 32
+    pad: 2
+    kernel_size: 5
+    stride: 1
+    weight_filler {
+      type: "gaussian"
+      std: 0.0001
+    }
+    bias_filler {
+      type: "constant"
+    }
+  }
+}
+layers {
+  name: "pool1"
+  type: POOLING
+  bottom: "conv1"
+  top: "pool1"
+  pooling_param {
+    pool: MAX
+    kernel_size: 3
+    stride: 2
+  }
+}
+layers {
+  name: "relu1"
+  type: RELU
+  bottom: "pool1"
+  top: "pool1"
+}
+layers {
+  name: "norm1"
+  type: LRN_MAP
+  bottom: "pool1"
+  top: "norm1"
+  lrn_param {
+    local_size: 3
+    alpha: 5e-05
+    beta: 0.75
+  }
+}
+layers {
+  name: "conv2"
+  type: CONVOLUTION
+  bottom: "norm1"
+  top: "conv2"
+  blobs_lr: 1
+  blobs_lr: 2
+  convolution_param {
+    num_output: 32
+    pad: 2
+    kernel_size: 5
+    stride: 1
+    weight_filler {
+      type: "gaussian"
+      std: 0.01
+    }
+    bias_filler {
+      type: "constant"
+    }
+  }
+}
+layers {
+  name: "relu2"
+  type: RELU
+  bottom: "conv2"
+  top: "conv2"
+}
+layers {
+  name: "pool2"
+  type: POOLING
+  bottom: "conv2"
+  top: "pool2"
+  pooling_param {
+    pool: AVE
+    kernel_size: 3
+    stride: 2
+  }
+}
+layers {
+  name: "norm2"
+  type: LRN_MAP
+  bottom: "pool2"
+  top: "norm2"
+  lrn_param {
+    local_size: 3
+    alpha: 5e-05
+    beta: 0.75
+  }
+}
+layers {
+  name: "conv3"
+  type: CONVOLUTION
+  bottom: "norm2"
+  top: "conv3"
+  convolution_param {
+    num_output: 64
+    pad: 2
+    kernel_size: 5
+    stride: 1
+    weight_filler {
+      type: "gaussian"
+      std: 0.01
+    }
+    bias_filler {
+      type: "constant"
+    }
+  }
+}
+layers {
+  name: "relu3"
+  type: RELU
+  bottom: "conv3"
+  top: "conv3"
+}
+layers {
+  name: "pool3"
+  type: POOLING
+  bottom: "conv3"
+  top: "pool3"
+  pooling_param {
+    pool: AVE
+    kernel_size: 3
+    stride: 2
+  }
+}
+layers {
+  name: "ip1"
+  type: INNER_PRODUCT
+  bottom: "pool3"
+  top: "ip1"
+  blobs_lr: 1
+  blobs_lr: 2
+  weight_decay: 250
+  weight_decay: 0
+  inner_product_param {
+    num_output: 10
+    weight_filler {
+      type: "gaussian"
+      std: 0.01
+    }
+    bias_filler {
+      type: "constant"
+    }
+  }
+}
+layers {
+  name: "prob"
+  type: SOFTMAX
+  bottom: "ip1"
+  top: "prob"
+}
+layers {
+  name: "accuracy"
+  type: ACCURACY
+  bottom: "prob"
+  bottom: "label"
+  top: "accuracy"
+}
diff --git a/examples/cifar10/cifar10_full_lrn_map_train.prototxt b/examples/cifar10/cifar10_full_lrn_map_train.prototxt
new file mode 100644 (file)
index 0000000..b9c436e
--- /dev/null
@@ -0,0 +1,172 @@
+name: "CIFAR10_full_train"
+layers {
+  name: "cifar"
+  type: DATA
+  top: "data"
+  top: "label"
+  data_param {
+    source: "cifar10-leveldb/cifar-train-leveldb"
+    mean_file: "mean.binaryproto"
+    batch_size: 100
+  }
+}
+layers {
+  name: "conv1"
+  type: CONVOLUTION
+  bottom: "data"
+  top: "conv1"
+  blobs_lr: 1
+  blobs_lr: 2
+  convolution_param {
+    num_output: 32
+    pad: 2
+    kernel_size: 5
+    stride: 1
+    weight_filler {
+      type: "gaussian"
+      std: 0.0001
+    }
+    bias_filler {
+      type: "constant"
+    }
+  }
+}
+layers {
+  name: "pool1"
+  type: POOLING
+  bottom: "conv1"
+  top: "pool1"
+  pooling_param {
+    pool: MAX
+    kernel_size: 3
+    stride: 2
+  }
+}
+layers {
+  name: "relu1"
+  type: RELU
+  bottom: "pool1"
+  top: "pool1"
+}
+layers {
+  name: "norm1"
+  type: LRN_MAP
+  bottom: "pool1"
+  top: "norm1"
+  lrn_param {
+    local_size: 3
+    alpha: 5e-05
+    beta: 0.75
+  }
+}
+layers {
+  name: "conv2"
+  type: CONVOLUTION
+  bottom: "norm1"
+  top: "conv2"
+  blobs_lr: 1
+  blobs_lr: 2
+  convolution_param {
+    num_output: 32
+    pad: 2
+    kernel_size: 5
+    stride: 1
+    weight_filler {
+      type: "gaussian"
+      std: 0.01
+    }
+    bias_filler {
+      type: "constant"
+    }
+  }
+}
+layers {
+  name: "relu2"
+  type: RELU
+  bottom: "conv2"
+  top: "conv2"
+}
+layers {
+  name: "pool2"
+  type: POOLING
+  bottom: "conv2"
+  top: "pool2"
+  pooling_param {
+    pool: AVE
+    kernel_size: 3
+    stride: 2
+  }
+}
+layers {
+  name: "norm2"
+  type: LRN_MAP
+  bottom: "pool2"
+  top: "norm2"
+  lrn_param {
+    local_size: 3
+    alpha: 5e-05
+    beta: 0.75
+  }
+}
+layers {
+  name: "conv3"
+  type: CONVOLUTION
+  bottom: "norm2"
+  top: "conv3"
+  convolution_param {
+    num_output: 64
+    pad: 2
+    kernel_size: 5
+    stride: 1
+    weight_filler {
+      type: "gaussian"
+      std: 0.01
+    }
+    bias_filler {
+      type: "constant"
+    }
+  }
+}
+layers {
+  name: "relu3"
+  type: RELU
+  bottom: "conv3"
+  top: "conv3"
+}
+layers {
+  name: "pool3"
+  type: POOLING
+  bottom: "conv3"
+  top: "pool3"
+  pooling_param {
+    pool: AVE
+    kernel_size: 3
+    stride: 2
+  }
+}
+layers {
+  name: "ip1"
+  type: INNER_PRODUCT
+  bottom: "pool3"
+  top: "ip1"
+  blobs_lr: 1
+  blobs_lr: 2
+  weight_decay: 250
+  weight_decay: 0
+  inner_product_param {
+    num_output: 10
+    weight_filler {
+      type: "gaussian"
+      std: 0.01
+    }
+    bias_filler {
+      type: "constant"
+    }
+  }
+}
+layers {
+  name: "loss"
+  type: SOFTMAX_LOSS
+  bottom: "ip1"
+  bottom: "label"
+}
diff --git a/examples/cifar10/train_full_lrn_map.sh b/examples/cifar10/train_full_lrn_map.sh
new file mode 100755 (executable)
index 0000000..a8f1ad6
--- /dev/null
@@ -0,0 +1,17 @@
+#!/usr/bin/env sh
+
+TOOLS=../../build/tools
+
+GLOG_logtostderr=1 $TOOLS/train_net.bin \
+    cifar10_full_lrn_map_solver.prototxt \
+    cifar10_full_lrn_map_iter_60000.solverstate
+
+#reduce learning rate by factor of 10
+GLOG_logtostderr=1 $TOOLS/train_net.bin \
+    cifar10_full_lrn_map_solver_lr1.prototxt \
+    cifar10_full_lrn_map_iter_60000.solverstate
+
+#reduce learning rate by factor of 10
+GLOG_logtostderr=1 $TOOLS/train_net.bin \
+    cifar10_full_lrn_map_solver_lr2.prototxt \
+    cifar10_full_lrn_map_iter_65000.solverstate