Added Tests for Concat Layer, and passed
authorsguada <sguada@gmail.com>
Tue, 18 Feb 2014 02:16:38 +0000 (18:16 -0800)
committerSergio Guadarrama <sguada@gmail.com>
Fri, 28 Feb 2014 02:48:22 +0000 (18:48 -0800)
src/caffe/layers/concat_layer.cpp
src/caffe/test/test_concat_layer.cpp [new file with mode: 0644]

index 5a9d44486c74d20bd6c9c766b7a2736e4d60a931..5c3d3481858112bd57642f00463274a4e8fd8ac9 100644 (file)
@@ -23,7 +23,7 @@ void ConcatLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
   CHANNELS_ = bottom[0]->channels();
   HEIGHT_ = bottom[0]->height();
   WIDTH_ = bottom[0]->width();  
-  for (int i=1; i<bottom.size(), ++i) {
+  for (int i=1; i<bottom.size(); ++i) {
     COUNT_ += bottom[i]->count();
     if (concat_dim_==0) {
       NUM_ += bottom[i]->num();  
@@ -45,24 +45,25 @@ void ConcatLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
   Dtype* top_data = (*top)[0]->mutable_cpu_data();
   if (concat_dim_==0) {
     int offset_num = 0;    
-    for (i=0; i<bottom.size(); ++i) {      
-      Dtype* bottom_data = bottom[i]->cpu_data();
-      caffe_copy(bottom[i]->count(), bottom_data, top_data[(*top)[0]->offset(offset_num));
+    for (int i=0; i<bottom.size(); ++i) {      
+      const Dtype* bottom_data = bottom[i]->cpu_data();
+      int num_elem = bottom[i]->count();
+      caffe_copy(num_elem, bottom_data, top_data+(*top)[0]->offset(offset_num));
       offset_num += bottom[i]->num();
     }
   } else if (concat_dim_ == 1) {
     int offset_channel = 0;    
-    for (i=0; i<bottom.size(); ++i) {
-      Dtype* bottom_data = bottom[i]->cpu_data();
+    for (int i=0; i<bottom.size(); ++i) {
+      const Dtype* bottom_data = bottom[i]->cpu_data();
       int num_elem = bottom[i]->channels()*bottom[i]->height()*bottom[i]->width();
-      for (n=0; n<NUM_; ++n){
-        caffe_copy(num_elem, bottom_data[bottom[i]->offset(n),
-          top_data[(*top)[0]->offset(n,offset_channel)]);  
+      for (int n=0; n<NUM_; ++n){
+        caffe_copy(num_elem, bottom_data+bottom[i]->offset(n),
+          top_data+(*top)[0]->offset(n,offset_channel));  
       }          
       offset_channel += bottom[i]->channels();
     }
   } else {
-    LOG(FATAL) << "concat_dim along dim" << concat_dim << " not implemented yet";
+    LOG(FATAL) << "concat_dim along dim" << concat_dim_ << " not implemented yet";
   }  
 }
 
@@ -72,24 +73,24 @@ void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
   Dtype* top_data = (*top)[0]->mutable_gpu_data();
   if (concat_dim_==0) {
     int offset_num = 0;    
-    for (i=0; i<bottom.size(); ++i) {      
-      Dtype* bottom_data = bottom[i]->gpu_data();
-      caffe_gpu_copy(bottom[i]->count(), bottom_data, top_data[(*top)[0]->offset(offset_num));
+    for (int i=0; i<bottom.size(); ++i) {      
+      const Dtype* bottom_data = bottom[i]->gpu_data();
+      caffe_gpu_copy(bottom[i]->count(), bottom_data, top_data+(*top)[0]->offset(offset_num));
       offset_num += bottom[i]->num();
     }
   } else if (concat_dim_ == 1) {
     int offset_channel = 0;    
-    for (i=0; i<bottom.size(); ++i) {
-      Dtype* bottom_data = bottom[i]->gpu_data();
+    for (int i=0; i<bottom.size(); ++i) {
+      const Dtype* bottom_data = bottom[i]->gpu_data();
       int num_elem = bottom[i]->channels()*bottom[i]->height()*bottom[i]->width();
-      for (n=0; n<NUM_; ++n){
-        caffe_gpu_copy(num_elem, bottom_data[bottom[i]->offset(n),
-          top_data[(*top)[0]->offset(n,offset_channel)]);  
+      for (int n=0; n<NUM_; ++n){
+        caffe_gpu_copy(num_elem, bottom_data+bottom[i]->offset(n),
+          top_data+(*top)[0]->offset(n,offset_channel));  
       }          
       offset_channel += bottom[i]->channels();
     }
   } else {
-    LOG(FATAL) << "concat_dim along dim" << concat_dim << " not implemented yet";
+    LOG(FATAL) << "concat_dim along dim" << concat_dim_ << " not implemented yet";
   }  
 }
 
@@ -99,26 +100,26 @@ Dtype ConcatLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
   const Dtype* top_diff = top[0]->cpu_diff(); 
   if (concat_dim_==0) {
     int offset_num = 0;    
-    for (i=0; i < bottom->size(); ++i) {
+    for (int i=0; i < bottom->size(); ++i) {
       Blob<Dtype>* blob = (*bottom)[i];
       Dtype* bottom_diff = blob->mutable_cpu_diff();
-      caffe_copy(blob->count(), top_diff[top[0]->offset(offset_num)],bottom_diff);
+      caffe_copy(blob->count(), top_diff+top[0]->offset(offset_num),bottom_diff);
       offset_num += blob->num();
     }
   } else if (concat_dim_ == 1) {
     int offset_channel = 0;    
-    for (i=0; i < bottom->size(); ++i) {
+    for (int i=0; i < bottom->size(); ++i) {
       Blob<Dtype>* blob = (*bottom)[i];
       Dtype* bottom_diff = blob->mutable_cpu_diff();
       int num_elem = blob->channels()*blob->height()*blob->width();
-      for (n=0; n<NUM_; ++n){
-        caffe_copy(num_elem, top_diff[top[0]->offset(n,offset_channel)]
-          bottom_diff[blob->offset(n)]);  
+      for (int n=0; n<NUM_; ++n){
+        caffe_copy(num_elem, top_diff+top[0]->offset(n,offset_channel)
+          bottom_diff+blob->offset(n));  
       }          
       offset_channel += blob->channels();
     }
   } else {
-    LOG(FATAL) << "concat_dim along dim" << concat_dim << " not implemented yet";
+    LOG(FATAL) << "concat_dim along dim" << concat_dim_ << " not implemented yet";
   } 
   return Dtype(0.);
 }
@@ -130,26 +131,26 @@ Dtype ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
   const Dtype* top_diff = top[0]->gpu_diff(); 
   if (concat_dim_==0) {
     int offset_num = 0;    
-    for (i=0; i < bottom->size(); ++i) {
+    for (int i=0; i < bottom->size(); ++i) {
       Blob<Dtype>* blob = (*bottom)[i];
       Dtype* bottom_diff = blob->mutable_gpu_diff();
-      caffe_gpu_copy(blob->count(), top_diff[top[0]->offset(offset_num)],bottom_diff);
+      caffe_gpu_copy(blob->count(), top_diff+top[0]->offset(offset_num),bottom_diff);
       offset_num += blob->num();
     }
   } else if (concat_dim_ == 1) {
     int offset_channel = 0;    
-    for (i=0; i < bottom->size(); ++i) {
+    for (int i=0; i < bottom->size(); ++i) {
       Blob<Dtype>* blob = (*bottom)[i];
       Dtype* bottom_diff = blob->mutable_gpu_diff();
       int num_elem = blob->channels()*blob->height()*blob->width();
-      for (n=0; n<NUM_; ++n){
-        caffe_gpu_copy(num_elem, top_diff[top[0]->offset(n,offset_channel)]
-          bottom_diff[blob->offset(n)]);  
+      for (int n=0; n<NUM_; ++n){
+        caffe_gpu_copy(num_elem, top_diff+top[0]->offset(n,offset_channel)
+          bottom_diff+blob->offset(n));  
       }          
       offset_channel += blob->channels();
     }
   } else {
-    LOG(FATAL) << "concat_dim along dim" << concat_dim << " not implemented yet";
+    LOG(FATAL) << "concat_dim along dim" << concat_dim_ << " not implemented yet";
   } 
   return Dtype(0.);
 }
diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp
new file mode 100644 (file)
index 0000000..472c936
--- /dev/null
@@ -0,0 +1,127 @@
+// Copyright 2014 Sergio Guadarrama
+
+#include <cstring>
+#include <cuda_runtime.h>
+
+#include "gtest/gtest.h"
+#include "caffe/blob.hpp"
+#include "caffe/common.hpp"
+#include "caffe/filler.hpp"
+#include "caffe/vision_layers.hpp"
+#include "caffe/test/test_gradient_check_util.hpp"
+
+#include "caffe/test/test_caffe_main.hpp"
+
+namespace caffe {
+
+extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
+
+template <typename Dtype>
+class ConcatLayerTest : public ::testing::Test {
+ protected:
+  ConcatLayerTest()
+      : blob_bottom_0(new Blob<Dtype>(2, 3, 6, 5)),
+        blob_bottom_1(new Blob<Dtype>(2, 5, 6, 5)),
+        blob_bottom_2(new Blob<Dtype>(5, 3, 6, 5)),
+        blob_top_(new Blob<Dtype>()) {};
+  virtual void SetUp() {
+    // fill the values
+    FillerParameter filler_param;
+    filler_param.set_value(1.);
+    ConstantFiller<Dtype> filler(filler_param);
+    filler.Fill(this->blob_bottom_0);
+    filler_param.set_value(2.);
+    filler.Fill(this->blob_bottom_1);
+    filler_param.set_value(3.);
+    filler.Fill(this->blob_bottom_2);
+    blob_bottom_vec_0.push_back(blob_bottom_0);
+    blob_bottom_vec_0.push_back(blob_bottom_1);
+    blob_bottom_vec_1.push_back(blob_bottom_0);
+    blob_bottom_vec_1.push_back(blob_bottom_2);
+    blob_top_vec_.push_back(blob_top_);
+  };
+
+  virtual ~ConcatLayerTest() {
+    delete blob_bottom_0; delete blob_bottom_1; delete blob_bottom_2; delete blob_top_;
+  }
+
+  Blob<Dtype>* const blob_bottom_0;
+  Blob<Dtype>* const blob_bottom_1;
+  Blob<Dtype>* const blob_bottom_2;
+  Blob<Dtype>* const blob_top_;
+  vector<Blob<Dtype>*> blob_bottom_vec_0,blob_bottom_vec_1;
+  vector<Blob<Dtype>*> blob_top_vec_;
+};
+
+typedef ::testing::Types<float, double> Dtypes;
+TYPED_TEST_CASE(ConcatLayerTest, Dtypes);
+
+TYPED_TEST(ConcatLayerTest, TestSetupNum) {
+  LayerParameter layer_param;
+  layer_param.set_concat_dim(0);
+  ConcatLayer<TypeParam> layer(layer_param);
+  layer.SetUp(this->blob_bottom_vec_1, &(this->blob_top_vec_));
+  EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num()+this->blob_bottom_2->num());
+  EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0->channels());
+  EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0->height());
+  EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0->width());
+}
+
+TYPED_TEST(ConcatLayerTest, TestSetupChannels) {
+  LayerParameter layer_param;
+  ConcatLayer<TypeParam> layer(layer_param);
+  layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_));
+  EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num());
+  EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0->channels()+this->blob_bottom_1->channels());
+  EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0->height());
+  EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0->width());
+}
+
+
+TYPED_TEST(ConcatLayerTest, TestCPUNum) {
+  LayerParameter layer_param;
+  ConcatLayer<TypeParam> layer(layer_param);
+  Caffe::set_mode(Caffe::CPU);
+  layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_));
+  layer.Forward(this->blob_bottom_vec_0, &(this->blob_top_vec_));
+  for (int n = 0; n < this->blob_top_->num(); ++n) {
+    for (int c = 0; c < this->blob_bottom_0->channels(); ++c) {
+      for (int h = 0; h < this->blob_top_->height(); ++h) {
+        for (int w = 0; w < this->blob_top_->width(); ++w) {
+          EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_vec_0[0]->data_at(n, c, h, w));
+        }
+      }
+    }
+    for (int c = 0; c < this->blob_bottom_1->channels(); ++c) {
+      for (int h = 0; h < this->blob_top_->height(); ++h) {
+        for (int w = 0; w < this->blob_top_->width(); ++w) {
+          EXPECT_EQ(this->blob_top_->data_at(n, c+3, h, w), this->blob_bottom_vec_0[1]->data_at(n, c, h, w));
+        }
+      }
+    }
+  }
+}
+
+
+TYPED_TEST(ConcatLayerTest, TestCPUGradient) {
+  LayerParameter layer_param;
+  Caffe::set_mode(Caffe::CPU);
+  ConcatLayer<TypeParam> layer(layer_param);
+  GradientChecker<TypeParam> checker(1e-2, 1e-3);
+  // it is too expensive to call curand multiple times, so we don't do an
+  // exhaustive gradient check.
+  checker.CheckGradient(layer, this->blob_bottom_vec_0, this->blob_top_vec_);
+}
+
+TYPED_TEST(ConcatLayerTest, TestGPUGradient) {
+  LayerParameter layer_param;
+  Caffe::set_mode(Caffe::GPU);
+  ConcatLayer<TypeParam> layer(layer_param);
+  GradientChecker<TypeParam> checker(1e-2, 1e-3);
+  // it is too expensive to call curand multiple times, so we don't do an
+  // exhaustive gradient check.
+  checker.CheckGradient(layer, this->blob_bottom_vec_0, this->blob_top_vec_);
+}
+
+
+}