bugfix for lrn
authorYangqing Jia <jiayq84@gmail.com>
Thu, 19 Sep 2013 22:41:40 +0000 (15:41 -0700)
committerYangqing Jia <jiayq84@gmail.com>
Thu, 19 Sep 2013 22:41:40 +0000 (15:41 -0700)
src/caffeine/layers/lrn_layer.cu
src/caffeine/test/test_lrn_layer.cpp

index 53e1ae7..2c62136 100644 (file)
@@ -93,12 +93,14 @@ Dtype LRNLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
   // go through individual data
   int inverse_pre_pad = size_ - (size_ + 1) / 2;
   for (int n = 0; n < num_; ++n) {
+    int block_offset = scale_.offset(n);
     // first, compute diff_i * y_i / s_i
-    caffeine_mul<Dtype>(scale_.count(), top_diff, top_data,
+    caffeine_mul<Dtype>(channels_ * height_ * width_,
+        top_diff + block_offset, top_data + block_offset,
         padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad));
-    caffeine_div<Dtype>(scale_.count(),
+    caffeine_div<Dtype>(channels_ * height_ * width_,
         padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad),
-        scale_data,
+        scale_data + block_offset,
         padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad));
     // Now, compute the accumulated ratios and the bottom diff
     memset(accum_ratio_data, 0, sizeof(Dtype) * accum_ratio.count());
index 49a8ee4..c850eef 100644 (file)
@@ -109,12 +109,12 @@ TYPED_TEST(LRNLayerTest, TestCPUGradient) {
   LRNLayer<TypeParam> layer(layer_param);
   Caffeine::set_mode(Caffeine::CPU);
   // when testing the GPU gradient, let's do a small shape.
-  this->blob_bottom_->Reshape(1, 7, 1, 1);
+  this->blob_bottom_->Reshape(2, 7, 3, 3);
   FillerParameter filler_param;
   GaussianFiller<TypeParam> filler(filler_param);
   filler.Fill(this->blob_bottom_);
   GradientChecker<TypeParam> checker(1e-2, 1e-2);
-  checker.CheckGradient(layer, this->blob_bottom_vec_, this->blob_top_vec_);
+  checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
 }
 
 }