Adapted to V1 proto definition, test don't pass
authorSergio <sguada@gmail.com>
Mon, 14 Apr 2014 04:03:06 +0000 (21:03 -0700)
committerJeff Donahue <jeff.donahue@gmail.com>
Sat, 24 May 2014 22:15:11 +0000 (15:15 -0700)
src/caffe/layers/pooling_layer.cpp
src/caffe/layers/pooling_layer.cu
src/caffe/test/test_maxpool_dropout_layers.cpp

index e052f78..97506cc 100644 (file)
@@ -38,7 +38,7 @@ void PoolingLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
   (*top)[0]->Reshape(bottom[0]->num(), channels_, pooled_height_,
       pooled_width_);
   // If max pooling, we will initialize the vector index part.
-  if (this->layer_param_.pool() == LayerParameter_PoolMethod_MAX) {
+  if (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX) {
     max_idx_.reset(new SyncedMemory((*top)[0]->count() * sizeof(int)));
   }
   // If stochastic pooling, we will initialize the random index part.
index 3b5b8ee..3bdb562 100644 (file)
@@ -208,9 +208,9 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
     int h = (index / width) % height;
     int c = (index / width / height) % channels;
     int n = index / width / height / channels;
-    int phstart = (h < ksize) ? 0 : (h - ksize) / stride + 1;
+    int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
     int phend = min(h / stride + 1, pooled_height);
-    int pwstart = (w < ksize) ? 0 : (w - ksize) / stride + 1;
+    int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
     int pwend = min(w / stride + 1, pooled_width);
     Dtype gradient = 0;
     top_diff += (n * channels + c) * pooled_height * pooled_width;
@@ -361,7 +361,7 @@ void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
     // Since we have the mask we only need count top_diff
     count = top[0]->count(); 
     // NOLINT_NEXT_LINE(whitespace/operators)
-    caffe_gpu_memset(count,Dtype(0.),bottom_diff);
+    caffe_gpu_set(count,Dtype(0.),bottom_diff);
     MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, top_diff, top[0]->num(), channels_,
         height_, width_, pooled_height_, pooled_width_,
index 7468d8e..9d30263 100644 (file)
@@ -43,10 +43,11 @@ TYPED_TEST_CASE(MaxPoolingDropoutTest, Dtypes);
 
 TYPED_TEST(MaxPoolingDropoutTest, TestSetup) {
   LayerParameter layer_param;
-  layer_param.set_kernelsize(3);
-  layer_param.set_stride(2);
-  PoolingLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
+  pooling_param->set_kernel_size(3);
+  pooling_param->set_stride(2);
+  PoolingLayer<TypeParam> max_layer(layer_param);
+  max_layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   DropoutLayer<TypeParam> dropout_layer(layer_param);
   dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
@@ -57,10 +58,11 @@ TYPED_TEST(MaxPoolingDropoutTest, TestSetup) {
 
 
 TYPED_TEST(MaxPoolingDropoutTest, CPUForward) {
-  LayerParameter layer_param;
-  layer_param.set_kernelsize(3);
-  layer_param.set_stride(2);
   Caffe::set_mode(Caffe::CPU);
+  LayerParameter layer_param;
+  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
+  pooling_param->set_kernel_size(3);
+  pooling_param->set_stride(2);
   PoolingLayer<TypeParam> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
@@ -75,7 +77,7 @@ TYPED_TEST(MaxPoolingDropoutTest, CPUForward) {
   dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
   dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
   sum = 0.;
-  TypeParam scale = 1. / (1. - layer_param.dropout_ratio());
+  TypeParam scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
   top_data = this->blob_top_->cpu_data();
   for (int i = 0; i < this->blob_top_->count(); ++i) {
        sum += top_data[i];
@@ -85,10 +87,11 @@ TYPED_TEST(MaxPoolingDropoutTest, CPUForward) {
 }
 
 TYPED_TEST(MaxPoolingDropoutTest, GPUForward) {
-  LayerParameter layer_param;
-  layer_param.set_kernelsize(3);
-  layer_param.set_stride(2);
   Caffe::set_mode(Caffe::GPU);
+  LayerParameter layer_param;
+  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
+  pooling_param->set_kernel_size(3);
+  pooling_param->set_stride(2);
   PoolingLayer<TypeParam> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
@@ -103,7 +106,7 @@ TYPED_TEST(MaxPoolingDropoutTest, GPUForward) {
   dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
   dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
   sum = 0.;
-  TypeParam scale = 1. / (1. - layer_param.dropout_ratio());
+  TypeParam scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
   top_data = this->blob_top_->cpu_data();
   for (int i = 0; i < this->blob_top_->count(); ++i) {
        sum += top_data[i];
@@ -113,11 +116,12 @@ TYPED_TEST(MaxPoolingDropoutTest, GPUForward) {
 }
 
 TYPED_TEST(MaxPoolingDropoutTest, CPUBackward) {
-  LayerParameter layer_param;
-  layer_param.set_kernelsize(3);
-  layer_param.set_stride(2);
   Caffe::set_mode(Caffe::CPU);
   Caffe::set_phase(Caffe::TRAIN);
+  LayerParameter layer_param;
+  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
+  pooling_param->set_kernel_size(3);
+  pooling_param->set_stride(2);
   PoolingLayer<TypeParam> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
@@ -146,11 +150,12 @@ TYPED_TEST(MaxPoolingDropoutTest, CPUBackward) {
 }
 
 TYPED_TEST(MaxPoolingDropoutTest, GPUBackward) {
-  LayerParameter layer_param;
-  layer_param.set_kernelsize(3);
-  layer_param.set_stride(2);
   Caffe::set_mode(Caffe::GPU);
   Caffe::set_phase(Caffe::TRAIN);
+  LayerParameter layer_param;
+  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
+  pooling_param->set_kernel_size(3);
+  pooling_param->set_stride(2);
   PoolingLayer<TypeParam> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));