Modify Dropout to allow backward pass in TEST phase
authorSergio <sguada@gmail.com>
Sat, 28 Jun 2014 01:54:08 +0000 (18:54 -0700)
committerSergio <sguada@gmail.com>
Sat, 28 Jun 2014 01:54:08 +0000 (18:54 -0700)
Conflicts:
src/caffe/layers/dropout_layer.cpp
src/caffe/layers/dropout_layer.cu

src/caffe/layers/dropout_layer.cpp
src/caffe/layers/dropout_layer.cu

index e9a1a52..a3501bf 100644 (file)
@@ -49,14 +49,17 @@ template <typename Dtype>
 void DropoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
     const vector<bool>& propagate_down,
     vector<Blob<Dtype>*>* bottom) {
-  CHECK(Caffe::phase() == Caffe::TRAIN);
   if (propagate_down[0]) {
     const Dtype* top_diff = top[0]->cpu_diff();
     Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
-    const unsigned int* mask = rand_vec_->cpu_data();
-    const int count = (*bottom)[0]->count();
-    for (int i = 0; i < count; ++i) {
-      bottom_diff[i] = top_diff[i] * mask[i] * scale_;
+    if (Caffe::phase() == Caffe::TRAIN) {
+      const unsigned int* mask = rand_vec_->cpu_data();
+      const int count = (*bottom)[0]->count();
+      for (int i = 0; i < count; ++i) {
+        bottom_diff[i] = top_diff[i] * mask[i] * scale_;
+      }
+    } else {
+      caffe_copy(top[0]->count(), top_diff, bottom_diff);
     }
   }
 }
index 2c72264..0040d26 100644 (file)
@@ -58,17 +58,20 @@ template <typename Dtype>
 void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
     const vector<bool>& propagate_down,
     vector<Blob<Dtype>*>* bottom) {
-  CHECK(Caffe::phase() == Caffe::TRAIN);
   if (propagate_down[0]) {
     const Dtype* top_diff = top[0]->gpu_diff();
     Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
-    const unsigned int* mask =
-        static_cast<const unsigned int*>(rand_vec_->gpu_data());
-    const int count = (*bottom)[0]->count();
-    // NOLINT_NEXT_LINE(whitespace/operators)
-    DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
-        count, top_diff, mask, uint_thres_, scale_, bottom_diff);
-    CUDA_POST_KERNEL_CHECK;
+    if (Caffe::phase() == Caffe::TRAIN) {
+      const unsigned int* mask =
+          static_cast<const unsigned int*>(rand_vec_->gpu_data());
+      const int count = (*bottom)[0]->count();
+      // NOLINT_NEXT_LINE(whitespace/operators)
+      DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
+          count, top_diff, mask, uint_thres_, scale_, bottom_diff);
+      CUDA_POST_KERNEL_CHECK;
+    } else {
+      caffe_gpu_copy(top[0]->count(), bottom_data, top_data);
+    }
   }
 }