10x learning rate for fine tuning makes a big difference
authorRoss Girshick <rbg@eecs.berkeley.edu>
Sun, 12 Jan 2014 15:54:35 +0000 (07:54 -0800)
committerEvan Shelhamer <shelhamer@imaginarynumber.net>
Thu, 20 Mar 2014 02:07:23 +0000 (19:07 -0700)
models/pascal_finetune.prototxt
models/pascal_finetune_solver.prototxt
models/pascal_finetune_val.prototxt

index 229898e..d2a33f7 100644 (file)
@@ -3,8 +3,8 @@ layers {
   layer {
     name: "data"
     type: "window_data"
-    source: "/work5/rbg/convnet-selective-search/selective-search-data/window_file_2007_trainval.txt"
-    meanfile: "/home/rbg/working/caffe-rbg/data/ilsvrc2012_mean.binaryproto"
+    source: "/work4/rbg/convnet-selective-search/selective-search-data/window_file_2007_trainval.txt"
+    meanfile: "/home/eecs/rbg/working/caffe-rbg/data/ilsvrc2012_mean.binaryproto"
     batchsize: 128
     cropsize: 227
     context_pad: 16
@@ -350,8 +350,8 @@ layers {
       type: "constant"
       value: 0
     }
-    blobs_lr: 100.
-    blobs_lr: 200.
+    blobs_lr: 10.
+    blobs_lr: 20.
     weight_decay: 1.
     weight_decay: 0.
     can_clobber: false
index f53a1d5..8c46475 100644 (file)
@@ -2,7 +2,7 @@ train_net: "examples/pascal_finetune.prototxt"
 test_net: "examples/pascal_finetune_val.prototxt"
 test_iter: 100
 test_interval: 1000
-base_lr: 0.0001
+base_lr: 0.001
 lr_policy: "step"
 gamma: 0.1
 stepsize: 20000
index 53aab09..18a680f 100644 (file)
@@ -3,8 +3,8 @@ layers {
   layer {
     name: "data"
     type: "window_data"
-    source: "/work5/rbg/convnet-selective-search/selective-search-data/window_file_2007_test.txt"
-    meanfile: "/home/rbg/working/caffe-rbg/data/ilsvrc2012_mean.binaryproto"
+    source: "/work4/rbg/convnet-selective-search/selective-search-data/window_file_2007_test.txt"
+    meanfile: "/home/eecs/rbg/working/caffe-rbg/data/ilsvrc2012_mean.binaryproto"
     batchsize: 128
     cropsize: 227
     context_pad: 16
@@ -350,8 +350,8 @@ layers {
       type: "constant"
       value: 0
     }
-    blobs_lr: 100.
-    blobs_lr: 200.
+    blobs_lr: 10.
+    blobs_lr: 20.
     weight_decay: 1.
     weight_decay: 0.
   }