Move test headers to include/.
authorJeff Donahue <jeff.donahue@gmail.com>
Sat, 12 Jul 2014 13:58:35 +0000 (06:58 -0700)
committerJeff Donahue <jeff.donahue@gmail.com>
Mon, 14 Jul 2014 09:17:58 +0000 (02:17 -0700)
Add a test param to test both CPU and GPU (with both float and double
Dtypes).

44 files changed:
Makefile
include/caffe/test/test_caffe_main.hpp [new file with mode: 0644]
include/caffe/test/test_gradient_check_util.hpp [moved from src/caffe/test/test_gradient_check_util.hpp with 100% similarity]
src/caffe/test/test_accuracy_layer.cpp
src/caffe/test/test_argmax_layer.cpp
src/caffe/test/test_benchmark.cpp
src/caffe/test/test_blob.cpp
src/caffe/test/test_caffe_main.cpp
src/caffe/test/test_caffe_main.hpp [deleted file]
src/caffe/test/test_common.cpp
src/caffe/test/test_concat_layer.cpp
src/caffe/test/test_convolution_layer.cpp
src/caffe/test/test_data_layer.cpp
src/caffe/test/test_dummy_data_layer.cpp
src/caffe/test/test_eltwise_layer.cpp
src/caffe/test/test_euclidean_loss_layer.cpp
src/caffe/test/test_filler.cpp
src/caffe/test/test_flatten_layer.cpp
src/caffe/test/test_hdf5_output_layer.cpp
src/caffe/test/test_hdf5data_layer.cpp
src/caffe/test/test_hinge_loss_layer.cpp
src/caffe/test/test_im2col_kernel.cu
src/caffe/test/test_im2col_layer.cpp
src/caffe/test/test_image_data_layer.cpp
src/caffe/test/test_inner_product_layer.cpp
src/caffe/test/test_lrn_layer.cpp
src/caffe/test/test_math_functions.cpp
src/caffe/test/test_maxpool_dropout_layers.cpp
src/caffe/test/test_memory_data_layer.cpp
src/caffe/test/test_multinomial_logistic_loss_layer.cpp
src/caffe/test/test_net.cpp
src/caffe/test/test_neuron_layer.cpp
src/caffe/test/test_pooling_layer.cpp
src/caffe/test/test_power_layer.cpp
src/caffe/test/test_random_number_generator.cpp
src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp
src/caffe/test/test_softmax_layer.cpp
src/caffe/test/test_softmax_with_loss_layer.cpp
src/caffe/test/test_split_layer.cpp
src/caffe/test/test_stochastic_pooling.cpp
src/caffe/test/test_syncedmem.cpp
src/caffe/test/test_tanh_layer.cpp
src/caffe/test/test_threshold_layer.cpp
src/caffe/test/test_util_blas.cpp

index 829602b..c17d498 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -28,7 +28,7 @@ STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a
 # CXX_SRCS are the source files excluding the test ones.
 CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp")
 # HXX_SRCS are the header files
-HXX_SRCS := $(shell find include/$(PROJECT) -name "*.hpp")
+HXX_SRCS := $(shell find include/$(PROJECT) ! -name "test_*.hpp" -name "*.hpp")
 # CU_SRCS are the cuda source files
 CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu")
 # TEST_SRCS are the test source files
@@ -38,7 +38,7 @@ TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS))
 TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu")
 GTEST_SRC := src/gtest/gtest-all.cpp
 # TEST_HDRS are the test header files
-TEST_HDRS := $(shell find src/$(PROJECT) -name "test_*.hpp")
+TEST_HDRS := $(shell find include/$(PROJECT) -name "test_*.hpp")
 # TOOL_SRCS are the source files for the tool binaries
 TOOL_SRCS := $(shell find tools -name "*.cpp")
 # EXAMPLE_SRCS are the source files for the example binaries
@@ -282,7 +282,7 @@ SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo
 # Define build targets
 ##############################
 .PHONY: all test clean linecount lint tools examples $(DIST_ALIASES) \
-       py mat py$(PROJECT) mat$(PROJECT) proto runtest \
+       py mat py$(PROJECT) mat$(PROJECT) proto runtest runtestnogpu \
        superclean supercleanlist supercleanfiles warn
 
 all: $(NAME) $(STATIC_NAME) tools examples
@@ -343,6 +343,9 @@ $(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME)
 runtest: $(TEST_ALL_BIN)
        $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle
 
+runtestnogpu: $(TEST_ALL_BIN)
+       $(TEST_ALL_BIN) --gtest_shuffle --gtest_filter="-*GPU*:*/2.*:*/3.*"
+
 warn: $(EMPTY_WARN_REPORT)
 
 $(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR)
diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp
new file mode 100644 (file)
index 0000000..e1a7645
--- /dev/null
@@ -0,0 +1,62 @@
+// Copyright 2014 BVLC and contributors.
+
+// The main caffe test code. Your test cpp code should include this hpp
+// to allow a main function to be compiled into the binary.
+#ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
+#define CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
+
+#include <cuda_runtime.h>
+#include <glog/logging.h>
+#include <gtest/gtest.h>
+
+#include <cstdlib>
+#include <cstdio>
+
+#include "caffe/common.hpp"
+
+using std::cout;
+using std::endl;
+
+int main(int argc, char** argv);
+
+namespace caffe {
+
+template <typename TypeParam>
+class MultiDeviceTest : public ::testing::Test {
+ public:
+  typedef typename TypeParam::Dtype Dtype;
+ protected:
+  MultiDeviceTest() {
+    Caffe::set_mode(TypeParam::device);
+  }
+  virtual ~MultiDeviceTest() {}
+};
+
+typedef ::testing::Types<float, double> TestDtypes;
+
+struct FloatCPU {
+  typedef float Dtype;
+  static const Caffe::Brew device = Caffe::CPU;
+};
+
+struct DoubleCPU {
+  typedef double Dtype;
+  static const Caffe::Brew device = Caffe::CPU;
+};
+
+struct FloatGPU {
+  typedef float Dtype;
+  static const Caffe::Brew device = Caffe::GPU;
+};
+
+struct DoubleGPU {
+  typedef double Dtype;
+  static const Caffe::Brew device = Caffe::GPU;
+};
+
+typedef ::testing::Types<FloatCPU, DoubleCPU, FloatGPU, DoubleGPU>
+    TestDtypesAndDevices;
+
+}  // namespace caffe
+
+#endif  // CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
index deb72af..355a36b 100644 (file)
@@ -71,14 +71,12 @@ class AccuracyLayerTest : public ::testing::Test {
   int top_k_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(AccuracyLayerTest, Dtypes);
+TYPED_TEST_CASE(AccuracyLayerTest, TestDtypes);
 
 TYPED_TEST(AccuracyLayerTest, TestForwardCPU) {
   LayerParameter layer_param;
   AccuracyParameter* accuracy_param = layer_param.mutable_accuracy_param();
   accuracy_param->set_top_k(this->top_k_);
-  Caffe::set_mode(Caffe::CPU);
   AccuracyLayer<TypeParam> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
index c4150e5..44a13b9 100644 (file)
@@ -37,9 +37,7 @@ class ArgMaxLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(ArgMaxLayerTest, Dtypes);
-
+TYPED_TEST_CASE(ArgMaxLayerTest, TestDtypes);
 
 TYPED_TEST(ArgMaxLayerTest, TestSetup) {
   LayerParameter layer_param;
index 40eee9c..8288008 100644 (file)
@@ -12,26 +12,19 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-class BenchmarkTest : public ::testing::Test {};
+template <typename TypeParam>
+class BenchmarkTest : public MultiDeviceTest<TypeParam> {};
 
-TEST_F(BenchmarkTest, TestTimerConstructorCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  Timer timer;
-  EXPECT_TRUE(timer.initted());
-  EXPECT_FALSE(timer.running());
-  EXPECT_FALSE(timer.has_run_at_least_once());
-}
+TYPED_TEST_CASE(BenchmarkTest, TestDtypesAndDevices);
 
-TEST_F(BenchmarkTest, TestTimerConstructorGPU) {
-  Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(BenchmarkTest, TestTimerConstructor) {
   Timer timer;
   EXPECT_TRUE(timer.initted());
   EXPECT_FALSE(timer.running());
   EXPECT_FALSE(timer.has_run_at_least_once());
 }
 
-TEST_F(BenchmarkTest, TestTimerStartCPU) {
-  Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(BenchmarkTest, TestTimerStart) {
   Timer timer;
   timer.Start();
   EXPECT_TRUE(timer.initted());
@@ -48,26 +41,7 @@ TEST_F(BenchmarkTest, TestTimerStartCPU) {
   EXPECT_TRUE(timer.has_run_at_least_once());
 }
 
-TEST_F(BenchmarkTest, TestTimerStartGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  Timer timer;
-  timer.Start();
-  EXPECT_TRUE(timer.initted());
-  EXPECT_TRUE(timer.running());
-  EXPECT_TRUE(timer.has_run_at_least_once());
-  timer.Stop();
-  timer.Start();
-  EXPECT_TRUE(timer.initted());
-  EXPECT_TRUE(timer.running());
-  EXPECT_TRUE(timer.has_run_at_least_once());
-  timer.Start();
-  EXPECT_TRUE(timer.initted());
-  EXPECT_TRUE(timer.running());
-  EXPECT_TRUE(timer.has_run_at_least_once());
-}
-
-TEST_F(BenchmarkTest, TestTimerStopCPU) {
-  Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(BenchmarkTest, TestTimerStop) {
   Timer timer;
   timer.Stop();
   EXPECT_TRUE(timer.initted());
@@ -84,83 +58,31 @@ TEST_F(BenchmarkTest, TestTimerStopCPU) {
   EXPECT_TRUE(timer.has_run_at_least_once());
 }
 
-TEST_F(BenchmarkTest, TestTimerStopGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  Timer timer;
-  timer.Stop();
-  EXPECT_TRUE(timer.initted());
-  EXPECT_FALSE(timer.running());
-  EXPECT_FALSE(timer.has_run_at_least_once());
-  timer.Start();
-  timer.Stop();
-  EXPECT_TRUE(timer.initted());
-  EXPECT_FALSE(timer.running());
-  EXPECT_TRUE(timer.has_run_at_least_once());
-  timer.Stop();
-  EXPECT_TRUE(timer.initted());
-  EXPECT_FALSE(timer.running());
-  EXPECT_TRUE(timer.has_run_at_least_once());
-}
-
-TEST_F(BenchmarkTest, TestTimerMilliSecondsCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  Timer timer;
-  CHECK_EQ(timer.MilliSeconds(), 0);
-  EXPECT_TRUE(timer.initted());
-  EXPECT_FALSE(timer.running());
-  EXPECT_FALSE(timer.has_run_at_least_once());
-  timer.Start();
-  usleep(300 * 1000);
-  CHECK_GE(timer.MilliSeconds(), 298);
-  CHECK_LE(timer.MilliSeconds(), 302);
-  EXPECT_TRUE(timer.initted());
-  EXPECT_FALSE(timer.running());
-  EXPECT_TRUE(timer.has_run_at_least_once());
-}
-
-TEST_F(BenchmarkTest, TestTimerMilliSecondsGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  Timer timer;
-  CHECK_EQ(timer.MilliSeconds(), 0);
-  EXPECT_TRUE(timer.initted());
-  EXPECT_FALSE(timer.running());
-  EXPECT_FALSE(timer.has_run_at_least_once());
-  timer.Start();
-  usleep(300 * 1000);
-  CHECK_GE(timer.MilliSeconds(), 298);
-  CHECK_LE(timer.MilliSeconds(), 302);
-  EXPECT_TRUE(timer.initted());
-  EXPECT_FALSE(timer.running());
-  EXPECT_TRUE(timer.has_run_at_least_once());
-}
-
-TEST_F(BenchmarkTest, TestTimerSecondsCPU) {
-  Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(BenchmarkTest, TestTimerMilliSeconds) {
   Timer timer;
-  CHECK_EQ(timer.Seconds(), 0);
+  EXPECT_EQ(timer.MilliSeconds(), 0);
   EXPECT_TRUE(timer.initted());
   EXPECT_FALSE(timer.running());
   EXPECT_FALSE(timer.has_run_at_least_once());
   timer.Start();
   usleep(300 * 1000);
-  CHECK_GE(timer.Seconds(), 0.298);
-  CHECK_LE(timer.Seconds(), 0.302);
+  EXPECT_GE(timer.MilliSeconds(), 298);
+  EXPECT_LE(timer.MilliSeconds(), 302);
   EXPECT_TRUE(timer.initted());
   EXPECT_FALSE(timer.running());
   EXPECT_TRUE(timer.has_run_at_least_once());
 }
 
-TEST_F(BenchmarkTest, TestTimerSecondsGPU) {
-  Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(BenchmarkTest, TestTimerSeconds) {
   Timer timer;
-  CHECK_EQ(timer.Seconds(), 0);
+  EXPECT_EQ(timer.Seconds(), 0);
   EXPECT_TRUE(timer.initted());
   EXPECT_FALSE(timer.running());
   EXPECT_FALSE(timer.has_run_at_least_once());
   timer.Start();
   usleep(300 * 1000);
-  CHECK_GE(timer.Seconds(), 0.298);
-  CHECK_LE(timer.Seconds(), 0.302);
+  EXPECT_GE(timer.Seconds(), 0.298);
+  EXPECT_LE(timer.Seconds(), 0.302);
   EXPECT_TRUE(timer.initted());
   EXPECT_FALSE(timer.running());
   EXPECT_TRUE(timer.has_run_at_least_once());
index 5d38e54..a524094 100644 (file)
@@ -23,8 +23,7 @@ class BlobSimpleTest : public ::testing::Test {
   Blob<Dtype>* const blob_preshaped_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(BlobSimpleTest, Dtypes);
+TYPED_TEST_CASE(BlobSimpleTest, TestDtypes);
 
 TYPED_TEST(BlobSimpleTest, TestInitialization) {
   EXPECT_TRUE(this->blob_);
@@ -41,7 +40,7 @@ TYPED_TEST(BlobSimpleTest, TestInitialization) {
   EXPECT_EQ(this->blob_->count(), 0);
 }
 
-TYPED_TEST(BlobSimpleTest, TestPointers) {
+TYPED_TEST(BlobSimpleTest, TestPointersCPUGPU) {
   EXPECT_TRUE(this->blob_preshaped_->gpu_data());
   EXPECT_TRUE(this->blob_preshaped_->cpu_data());
   EXPECT_TRUE(this->blob_preshaped_->mutable_gpu_data());
index ecc117e..07e6b8d 100644 (file)
@@ -3,7 +3,7 @@
 // The main caffe test code. Your test cpp code should include this hpp
 // to allow a main function to be compiled into the binary.
 
-#include "test_caffe_main.hpp"
+#include "caffe/test/test_caffe_main.hpp"
 
 namespace caffe {
   cudaDeviceProp CAFFE_TEST_CUDA_PROP;
diff --git a/src/caffe/test/test_caffe_main.hpp b/src/caffe/test/test_caffe_main.hpp
deleted file mode 100644 (file)
index df64cbb..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2014 BVLC and contributors.
-
-// The main caffe test code. Your test cpp code should include this hpp
-// to allow a main function to be compiled into the binary.
-#ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
-#define CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
-
-#include <cuda_runtime.h>
-#include <glog/logging.h>
-#include <gtest/gtest.h>
-
-#include <cstdlib>
-#include <cstdio>
-
-using std::cout;
-using std::endl;
-
-int main(int argc, char** argv);
-
-#endif  // CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
index 13c2d95..a452b61 100644 (file)
@@ -13,7 +13,7 @@ namespace caffe {
 
 class CommonTest : public ::testing::Test {};
 
-TEST_F(CommonTest, TestCublasHandler) {
+TEST_F(CommonTest, TestCublasHandlerGPU) {
   int cuda_device_id;
   CUDA_CHECK(cudaGetDevice(&cuda_device_id));
   EXPECT_TRUE(Caffe::cublas_handle());
@@ -53,10 +53,10 @@ TEST_F(CommonTest, TestRandSeedGPU) {
   SyncedMemory data_b(10 * sizeof(unsigned int));
   Caffe::set_random_seed(1701);
   CURAND_CHECK(curandGenerate(Caffe::curand_generator(),
-        reinterpret_cast<unsigned int*>(data_a.mutable_gpu_data()), 10));
+        static_cast<unsigned int*>(data_a.mutable_gpu_data()), 10));
   Caffe::set_random_seed(1701);
   CURAND_CHECK(curandGenerate(Caffe::curand_generator(),
-        reinterpret_cast<unsigned int*>(data_b.mutable_gpu_data()), 10));
+        static_cast<unsigned int*>(data_b.mutable_gpu_data()), 10));
   for (int i = 0; i < 10; ++i) {
     EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i],
         ((const unsigned int*)(data_b.cpu_data()))[i]);
index 72e3c90..ff208a9 100644 (file)
@@ -17,8 +17,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class ConcatLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class ConcatLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   ConcatLayerTest()
       : blob_bottom_0(new Blob<Dtype>(2, 3, 6, 5)),
@@ -55,13 +57,13 @@ class ConcatLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(ConcatLayerTest, Dtypes);
+TYPED_TEST_CASE(ConcatLayerTest, TestDtypesAndDevices);
 
 TYPED_TEST(ConcatLayerTest, TestSetupNum) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   layer_param.mutable_concat_param()->set_concat_dim(0);
-  ConcatLayer<TypeParam> layer(layer_param);
+  ConcatLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_1, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(),
     this->blob_bottom_0->num() + this->blob_bottom_2->num());
@@ -71,8 +73,9 @@ TYPED_TEST(ConcatLayerTest, TestSetupNum) {
 }
 
 TYPED_TEST(ConcatLayerTest, TestSetupChannels) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  ConcatLayer<TypeParam> layer(layer_param);
+  ConcatLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num());
   EXPECT_EQ(this->blob_top_->channels(),
@@ -82,10 +85,10 @@ TYPED_TEST(ConcatLayerTest, TestSetupChannels) {
 }
 
 
-TYPED_TEST(ConcatLayerTest, TestCPUNum) {
+TYPED_TEST(ConcatLayerTest, TestNum) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  ConcatLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::CPU);
+  ConcatLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_0, &(this->blob_top_vec_));
   for (int n = 0; n < this->blob_top_->num(); ++n) {
@@ -108,21 +111,11 @@ TYPED_TEST(ConcatLayerTest, TestCPUNum) {
   }
 }
 
-
-TYPED_TEST(ConcatLayerTest, TestCPUGradient) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  ConcatLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
-  checker.CheckGradient(&layer, &(this->blob_bottom_vec_0),
-    &(this->blob_top_vec_));
-}
-
-TYPED_TEST(ConcatLayerTest, TestGPUGradient) {
+TYPED_TEST(ConcatLayerTest, TestGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  ConcatLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
+  ConcatLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3);
   checker.CheckGradient(&layer, &(this->blob_bottom_vec_0),
     &(this->blob_top_vec_));
 }
index f740101..6f3e314 100644 (file)
@@ -17,8 +17,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class ConvolutionLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class ConvolutionLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   ConvolutionLayerTest()
       : blob_bottom_(new Blob<Dtype>(2, 3, 6, 4)),
@@ -51,10 +53,10 @@ class ConvolutionLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(ConvolutionLayerTest, Dtypes);
+TYPED_TEST_CASE(ConvolutionLayerTest, TestDtypesAndDevices);
 
 TYPED_TEST(ConvolutionLayerTest, TestSetup) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   ConvolutionParameter* convolution_param =
       layer_param.mutable_convolution_param();
@@ -63,8 +65,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSetup) {
   convolution_param->set_num_output(4);
   this->blob_bottom_vec_.push_back(this->blob_bottom_2_);
   this->blob_top_vec_.push_back(this->blob_top_2_);
-  shared_ptr<Layer<TypeParam> > layer(
-      new ConvolutionLayer<TypeParam>(layer_param));
+  shared_ptr<Layer<Dtype> > layer(
+      new ConvolutionLayer<Dtype>(layer_param));
   layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), 2);
   EXPECT_EQ(this->blob_top_->channels(), 4);
@@ -77,7 +79,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSetup) {
   // setting group should not change the shape
   convolution_param->set_num_output(3);
   convolution_param->set_group(3);
-  layer.reset(new ConvolutionLayer<TypeParam>(layer_param));
+  layer.reset(new ConvolutionLayer<Dtype>(layer_param));
   layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), 2);
   EXPECT_EQ(this->blob_top_->channels(), 3);
@@ -89,15 +91,16 @@ TYPED_TEST(ConvolutionLayerTest, TestSetup) {
   EXPECT_EQ(this->blob_top_2_->width(), 1);
 }
 
-TYPED_TEST(ConvolutionLayerTest, TestCPUSimpleConvolution) {
+TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) {
   // We will simply see if the convolution layer carries out averaging well.
-  shared_ptr<ConstantFiller<TypeParam> > filler;
+  typedef typename TypeParam::Dtype Dtype;
+  shared_ptr<ConstantFiller<Dtype> > filler;
   FillerParameter filler_param;
   filler_param.set_value(1.);
-  filler.reset(new ConstantFiller<TypeParam>(filler_param));
+  filler.reset(new ConstantFiller<Dtype>(filler_param));
   filler->Fill(this->blob_bottom_);
   filler_param.set_value(2.);
-  filler.reset(new ConstantFiller<TypeParam>(filler_param));
+  filler.reset(new ConstantFiller<Dtype>(filler_param));
   filler->Fill(this->blob_bottom_2_);
   this->blob_bottom_vec_.push_back(this->blob_bottom_2_);
   this->blob_top_vec_.push_back(this->blob_top_2_);
@@ -111,13 +114,12 @@ TYPED_TEST(ConvolutionLayerTest, TestCPUSimpleConvolution) {
   convolution_param->mutable_weight_filler()->set_value(1);
   convolution_param->mutable_bias_filler()->set_type("constant");
   convolution_param->mutable_bias_filler()->set_value(0.1);
-  shared_ptr<Layer<TypeParam> > layer(
-      new ConvolutionLayer<TypeParam>(layer_param));
+  shared_ptr<Layer<Dtype> > layer(
+      new ConvolutionLayer<Dtype>(layer_param));
   layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  Caffe::set_mode(Caffe::CPU);
   layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // After the convolution, the output should all have output values 27.1
-  const TypeParam* top_data = this->blob_top_->cpu_data();
+  const Dtype* top_data = this->blob_top_->cpu_data();
   for (int i = 0; i < this->blob_top_->count(); ++i) {
     EXPECT_NEAR(top_data[i], 27.1, 1e-4);
   }
@@ -127,98 +129,14 @@ TYPED_TEST(ConvolutionLayerTest, TestCPUSimpleConvolution) {
   }
 }
 
-TYPED_TEST(ConvolutionLayerTest, TestGPUSimpleConvolution) {
-  // We will simply see if the convolution layer carries out averaging well.
-  shared_ptr<ConstantFiller<TypeParam> > filler;
-  FillerParameter filler_param;
-  filler_param.set_value(1.);
-  filler.reset(new ConstantFiller<TypeParam>(filler_param));
-  filler->Fill(this->blob_bottom_);
-  filler_param.set_value(2.);
-  filler.reset(new ConstantFiller<TypeParam>(filler_param));
-  filler->Fill(this->blob_bottom_2_);
-  this->blob_bottom_vec_.push_back(this->blob_bottom_2_);
-  this->blob_top_vec_.push_back(this->blob_top_2_);
-  LayerParameter layer_param;
-  ConvolutionParameter* convolution_param =
-      layer_param.mutable_convolution_param();
-  convolution_param->set_kernel_size(3);
-  convolution_param->set_stride(2);
-  convolution_param->set_num_output(4);
-  convolution_param->mutable_weight_filler()->set_type("constant");
-  convolution_param->mutable_weight_filler()->set_value(1);
-  convolution_param->mutable_bias_filler()->set_type("constant");
-  convolution_param->mutable_bias_filler()->set_value(0.1);
-  shared_ptr<Layer<TypeParam> > layer(
-      new ConvolutionLayer<TypeParam>(layer_param));
-  layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  Caffe::set_mode(Caffe::GPU);
-  layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  // After the convolution, the output should all have output values 27.1
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  for (int i = 0; i < this->blob_top_->count(); ++i) {
-    EXPECT_NEAR(top_data[i], 27.1, 1e-4);
-  }
-  top_data = this->blob_top_2_->cpu_data();
-  for (int i = 0; i < this->blob_top_2_->count(); ++i) {
-    EXPECT_NEAR(top_data[i], 54.1, 1e-4);
-  }
-}
-
-TYPED_TEST(ConvolutionLayerTest, TestCPUSimpleConvolutionGroup) {
-  // We will simply see if the convolution layer carries out averaging well.
-  FillerParameter filler_param;
-  filler_param.set_value(1.);
-  ConstantFiller<TypeParam> filler(filler_param);
-  filler.Fill(this->blob_bottom_);
-  TypeParam* bottom_data = this->blob_bottom_->mutable_cpu_data();
-  for (int n = 0; n < this->blob_bottom_->num(); ++n) {
-    for (int c = 0; c < this->blob_bottom_->channels(); ++c) {
-      for (int h = 0; h < this->blob_bottom_->height(); ++h) {
-        for (int w = 0; w < this->blob_bottom_->width(); ++w) {
-          bottom_data[this->blob_bottom_->offset(n, c, h, w)] = c;
-        }
-      }
-    }
-  }
-  LayerParameter layer_param;
-  ConvolutionParameter* convolution_param =
-      layer_param.mutable_convolution_param();
-  convolution_param->set_kernel_size(3);
-  convolution_param->set_stride(2);
-  convolution_param->set_num_output(3);
-  convolution_param->set_group(3);
-  convolution_param->mutable_weight_filler()->set_type("constant");
-  convolution_param->mutable_weight_filler()->set_value(1);
-  convolution_param->mutable_bias_filler()->set_type("constant");
-  convolution_param->mutable_bias_filler()->set_value(0.1);
-  shared_ptr<Layer<TypeParam> > layer(
-      new ConvolutionLayer<TypeParam>(layer_param));
-  layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  Caffe::set_mode(Caffe::CPU);
-  layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  // After the convolution, the output should all have output values 9.1
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  for (int n = 0; n < this->blob_top_->num(); ++n) {
-    for (int c = 0; c < this->blob_top_->channels(); ++c) {
-      for (int h = 0; h < this->blob_top_->height(); ++h) {
-        for (int w = 0; w < this->blob_top_->width(); ++w) {
-          TypeParam data = top_data[this->blob_top_->offset(n, c, h, w)];
-          EXPECT_NEAR(data, c * 9 + 0.1, 1e-4);
-        }
-      }
-    }
-  }
-}
-
-
-TYPED_TEST(ConvolutionLayerTest, TestGPUSimpleConvolutionGroup) {
+TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) {
   // We will simply see if the convolution layer carries out averaging well.
+  typedef typename TypeParam::Dtype Dtype;
   FillerParameter filler_param;
   filler_param.set_value(1.);
-  ConstantFiller<TypeParam> filler(filler_param);
+  ConstantFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_);
-  TypeParam* bottom_data = this->blob_bottom_->mutable_cpu_data();
+  Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data();
   for (int n = 0; n < this->blob_bottom_->num(); ++n) {
     for (int c = 0; c < this->blob_bottom_->channels(); ++c) {
       for (int h = 0; h < this->blob_bottom_->height(); ++h) {
@@ -239,18 +157,17 @@ TYPED_TEST(ConvolutionLayerTest, TestGPUSimpleConvolutionGroup) {
   convolution_param->mutable_weight_filler()->set_value(1);
   convolution_param->mutable_bias_filler()->set_type("constant");
   convolution_param->mutable_bias_filler()->set_value(0.1);
-  shared_ptr<Layer<TypeParam> > layer(
-      new ConvolutionLayer<TypeParam>(layer_param));
+  shared_ptr<Layer<Dtype> > layer(
+      new ConvolutionLayer<Dtype>(layer_param));
   layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  Caffe::set_mode(Caffe::GPU);
   layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // After the convolution, the output should all have output values 9.1
-  const TypeParam* top_data = this->blob_top_->cpu_data();
+  const Dtype* top_data = this->blob_top_->cpu_data();
   for (int n = 0; n < this->blob_top_->num(); ++n) {
     for (int c = 0; c < this->blob_top_->channels(); ++c) {
       for (int h = 0; h < this->blob_top_->height(); ++h) {
         for (int w = 0; w < this->blob_top_->width(); ++w) {
-          TypeParam data = top_data[this->blob_top_->offset(n, c, h, w)];
+          Dtype data = top_data[this->blob_top_->offset(n, c, h, w)];
           EXPECT_NEAR(data, c * 9 + 0.1, 1e-4);
         }
       }
@@ -258,43 +175,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGPUSimpleConvolutionGroup) {
   }
 }
 
-
-TYPED_TEST(ConvolutionLayerTest, TestCPUGradient) {
-  LayerParameter layer_param;
-  ConvolutionParameter* convolution_param =
-      layer_param.mutable_convolution_param();
-  this->blob_bottom_vec_.push_back(this->blob_bottom_2_);
-  this->blob_top_vec_.push_back(this->blob_top_2_);
-  convolution_param->set_kernel_size(3);
-  convolution_param->set_stride(2);
-  convolution_param->set_num_output(2);
-  convolution_param->mutable_weight_filler()->set_type("gaussian");
-  convolution_param->mutable_bias_filler()->set_type("gaussian");
-  Caffe::set_mode(Caffe::CPU);
-  ConvolutionLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
-  checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(ConvolutionLayerTest, TestCPUGradientGroup) {
-  LayerParameter layer_param;
-  ConvolutionParameter* convolution_param =
-      layer_param.mutable_convolution_param();
-  convolution_param->set_kernel_size(3);
-  convolution_param->set_stride(2);
-  convolution_param->set_num_output(3);
-  convolution_param->set_group(3);
-  convolution_param->mutable_weight_filler()->set_type("gaussian");
-  convolution_param->mutable_bias_filler()->set_type("gaussian");
-  Caffe::set_mode(Caffe::CPU);
-  ConvolutionLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
-  checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) {
+TYPED_TEST(ConvolutionLayerTest, TestGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   ConvolutionParameter* convolution_param =
       layer_param.mutable_convolution_param();
@@ -305,14 +187,14 @@ TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) {
   convolution_param->set_num_output(2);
   convolution_param->mutable_weight_filler()->set_type("gaussian");
   convolution_param->mutable_bias_filler()->set_type("gaussian");
-  Caffe::set_mode(Caffe::GPU);
-  ConvolutionLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
+  ConvolutionLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3);
   checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
 
-TYPED_TEST(ConvolutionLayerTest, TestGPUGradientGroup) {
+TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   ConvolutionParameter* convolution_param =
       layer_param.mutable_convolution_param();
@@ -322,9 +204,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGPUGradientGroup) {
   convolution_param->set_group(3);
   convolution_param->mutable_weight_filler()->set_type("gaussian");
   convolution_param->mutable_bias_filler()->set_type("gaussian");
-  Caffe::set_mode(Caffe::GPU);
-  ConvolutionLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
+  ConvolutionLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3);
   checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
index 8ba5f29..8cd157d 100644 (file)
@@ -20,8 +20,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class DataLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class DataLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   DataLayerTest()
       : backend_(DataParameter_DB_LEVELDB),
@@ -309,54 +311,25 @@ class DataLayerTest : public ::testing::Test {
   int seed_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(DataLayerTest, Dtypes);
+TYPED_TEST_CASE(DataLayerTest, TestDtypesAndDevices);
 
-TYPED_TEST(DataLayerTest, TestReadLevelDBCPU) {
-  Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(DataLayerTest, TestReadLevelDB) {
   const bool unique_pixels = false;  // all pixels the same; images different
   this->FillLevelDB(unique_pixels);
   this->TestRead();
 }
 
-TYPED_TEST(DataLayerTest, TestReadLevelDBGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  const bool unique_pixels = false;  // all pixels the same; images different
-  this->FillLevelDB(unique_pixels);
-  this->TestRead();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDBCPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDB) {
   Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::CPU);
   const bool unique_pixels = true;  // all images the same; pixels different
   this->FillLevelDB(unique_pixels);
   this->TestReadCrop();
 }
 
-TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDBGPU) {
-  Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::GPU);
-  const bool unique_pixels = true;  // all images the same; pixels different
-  this->FillLevelDB(unique_pixels);
-  this->TestReadCrop();
-}
-
-// Test that the sequence of random crops is consistent when using
-// Caffe::set_random_seed.
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDBCPU) {
-  Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::CPU);
-  const bool unique_pixels = true;  // all images the same; pixels different
-  this->FillLevelDB(unique_pixels);
-  this->TestReadCropTrainSequenceSeeded();
-}
-
 // Test that the sequence of random crops is consistent when using
 // Caffe::set_random_seed.
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDBGPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDB) {
   Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::GPU);
   const bool unique_pixels = true;  // all images the same; pixels different
   this->FillLevelDB(unique_pixels);
   this->TestReadCropTrainSequenceSeeded();
@@ -364,65 +337,28 @@ TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDBGPU) {
 
 // Test that the sequence of random crops differs across iterations when
 // Caffe::set_random_seed isn't called (and seeds from srand are ignored).
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDBCPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDB) {
   Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::CPU);
   const bool unique_pixels = true;  // all images the same; pixels different
   this->FillLevelDB(unique_pixels);
   this->TestReadCropTrainSequenceUnseeded();
 }
 
-// Test that the sequence of random crops differs across iterations when
-// Caffe::set_random_seed isn't called (and seeds from srand are ignored).
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDBGPU) {
-  Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::GPU);
-  const bool unique_pixels = true;  // all images the same; pixels different
-  this->FillLevelDB(unique_pixels);
-  this->TestReadCropTrainSequenceUnseeded();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTestLevelDBCPU) {
-  Caffe::set_phase(Caffe::TEST);
-  Caffe::set_mode(Caffe::CPU);
-  const bool unique_pixels = true;  // all images the same; pixels different
-  this->FillLevelDB(unique_pixels);
-  this->TestReadCrop();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTestLevelDBGPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTestLevelDB) {
   Caffe::set_phase(Caffe::TEST);
-  Caffe::set_mode(Caffe::GPU);
   const bool unique_pixels = true;  // all images the same; pixels different
   this->FillLevelDB(unique_pixels);
   this->TestReadCrop();
 }
 
-TYPED_TEST(DataLayerTest, TestReadLMDBCPU) {
-  Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(DataLayerTest, TestReadLMDB) {
   const bool unique_pixels = false;  // all pixels the same; images different
   this->FillLMDB(unique_pixels);
   this->TestRead();
 }
 
-TYPED_TEST(DataLayerTest, TestReadLMDBGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  const bool unique_pixels = false;  // all pixels the same; images different
-  this->FillLMDB(unique_pixels);
-  this->TestRead();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTrainLMDBCPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainLMDB) {
   Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::CPU);
-  const bool unique_pixels = true;  // all images the same; pixels different
-  this->FillLMDB(unique_pixels);
-  this->TestReadCrop();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTrainLMDBGPU) {
-  Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::GPU);
   const bool unique_pixels = true;  // all images the same; pixels different
   this->FillLMDB(unique_pixels);
   this->TestReadCrop();
@@ -430,55 +366,24 @@ TYPED_TEST(DataLayerTest, TestReadCropTrainLMDBGPU) {
 
 // Test that the sequence of random crops is consistent when using
 // Caffe::set_random_seed.
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDBCPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDB) {
   Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::CPU);
   const bool unique_pixels = true;  // all images the same; pixels different
   this->FillLMDB(unique_pixels);
   this->TestReadCropTrainSequenceSeeded();
 }
 
-// Test that the sequence of random crops is consistent when using
-// Caffe::set_random_seed.
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDBGPU) {
-  Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::GPU);
-  const bool unique_pixels = true;  // all images the same; pixels different
-  this->FillLMDB(unique_pixels);
-  this->TestReadCropTrainSequenceSeeded();
-}
-
-// Test that the sequence of random crops differs across iterations when
-// Caffe::set_random_seed isn't called (and seeds from srand are ignored).
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDBCPU) {
-  Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::CPU);
-  const bool unique_pixels = true;  // all images the same; pixels different
-  this->FillLMDB(unique_pixels);
-  this->TestReadCropTrainSequenceUnseeded();
-}
-
 // Test that the sequence of random crops differs across iterations when
 // Caffe::set_random_seed isn't called (and seeds from srand are ignored).
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDBGPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDB) {
   Caffe::set_phase(Caffe::TRAIN);
-  Caffe::set_mode(Caffe::GPU);
   const bool unique_pixels = true;  // all images the same; pixels different
   this->FillLMDB(unique_pixels);
   this->TestReadCropTrainSequenceUnseeded();
 }
 
-TYPED_TEST(DataLayerTest, TestReadCropTestLMDBCPU) {
-  Caffe::set_phase(Caffe::TEST);
-  Caffe::set_mode(Caffe::CPU);
-  const bool unique_pixels = true;  // all images the same; pixels different
-  this->FillLMDB(unique_pixels);
-  this->TestReadCrop();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTestLMDBGPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTestLMDB) {
   Caffe::set_phase(Caffe::TEST);
-  Caffe::set_mode(Caffe::GPU);
   const bool unique_pixels = true;  // all images the same; pixels different
   this->FillLMDB(unique_pixels);
   this->TestReadCrop();
index 7d9287e..3a83a79 100644 (file)
@@ -46,8 +46,7 @@ class DummyDataLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(DummyDataLayerTest, Dtypes);
+TYPED_TEST_CASE(DummyDataLayerTest, TestDtypes);
 
 TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) {
   Caffe::set_mode(Caffe::CPU);
index 5f72f62..66490d2 100644 (file)
@@ -16,8 +16,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class EltwiseLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class EltwiseLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   EltwiseLayerTest()
       : blob_bottom_a_(new Blob<Dtype>(2, 3, 4, 5)),
@@ -49,15 +51,15 @@ class EltwiseLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(EltwiseLayerTest, Dtypes);
+TYPED_TEST_CASE(EltwiseLayerTest, TestDtypesAndDevices);
 
 TYPED_TEST(EltwiseLayerTest, TestSetUp) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
   eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD);
-  shared_ptr<EltwiseLayer<TypeParam> > layer(
-      new EltwiseLayer<TypeParam>(layer_param));
+  shared_ptr<EltwiseLayer<Dtype> > layer(
+      new EltwiseLayer<Dtype>(layer_param));
   layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), 2);
   EXPECT_EQ(this->blob_top_->channels(), 3);
@@ -65,185 +67,99 @@ TYPED_TEST(EltwiseLayerTest, TestSetUp) {
   EXPECT_EQ(this->blob_top_->width(), 5);
 }
 
-TYPED_TEST(EltwiseLayerTest, TestProdCPU) {
-  Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(EltwiseLayerTest, TestProd) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
   eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD);
-  shared_ptr<EltwiseLayer<TypeParam> > layer(
-      new EltwiseLayer<TypeParam>(layer_param));
+  shared_ptr<EltwiseLayer<Dtype> > layer(
+      new EltwiseLayer<Dtype>(layer_param));
   layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  const TypeParam* data = this->blob_top_->cpu_data();
+  const Dtype* data = this->blob_top_->cpu_data();
   const int count = this->blob_top_->count();
-  const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
-  const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
-  const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
+  const Dtype* in_data_a = this->blob_bottom_a_->cpu_data();
+  const Dtype* in_data_b = this->blob_bottom_b_->cpu_data();
+  const Dtype* in_data_c = this->blob_bottom_c_->cpu_data();
   for (int i = 0; i < count; ++i) {
     EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]);
   }
 }
 
-TYPED_TEST(EltwiseLayerTest, TestSumCPU) {
-  Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(EltwiseLayerTest, TestSum) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
   eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
-  shared_ptr<EltwiseLayer<TypeParam> > layer(
-      new EltwiseLayer<TypeParam>(layer_param));
+  shared_ptr<EltwiseLayer<Dtype> > layer(
+      new EltwiseLayer<Dtype>(layer_param));
   layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  const TypeParam* data = this->blob_top_->cpu_data();
+  const Dtype* data = this->blob_top_->cpu_data();
   const int count = this->blob_top_->count();
-  const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
-  const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
-  const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
+  const Dtype* in_data_a = this->blob_bottom_a_->cpu_data();
+  const Dtype* in_data_b = this->blob_bottom_b_->cpu_data();
+  const Dtype* in_data_c = this->blob_bottom_c_->cpu_data();
   for (int i = 0; i < count; ++i) {
     EXPECT_EQ(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i]);
   }
 }
 
-TYPED_TEST(EltwiseLayerTest, TestSumCoeffCPU) {
-  Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(EltwiseLayerTest, TestSumCoeff) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
   eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
   eltwise_param->add_coeff(1);
   eltwise_param->add_coeff(-0.5);
   eltwise_param->add_coeff(2);
-  shared_ptr<EltwiseLayer<TypeParam> > layer(
-      new EltwiseLayer<TypeParam>(layer_param));
+  shared_ptr<EltwiseLayer<Dtype> > layer(
+      new EltwiseLayer<Dtype>(layer_param));
   layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  const TypeParam* data = this->blob_top_->cpu_data();
+  const Dtype* data = this->blob_top_->cpu_data();
   const int count = this->blob_top_->count();
-  const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
-  const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
-  const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
+  const Dtype* in_data_a = this->blob_bottom_a_->cpu_data();
+  const Dtype* in_data_b = this->blob_bottom_b_->cpu_data();
+  const Dtype* in_data_c = this->blob_bottom_c_->cpu_data();
   for (int i = 0; i < count; ++i) {
     EXPECT_NEAR(data[i], in_data_a[i] - 0.5*in_data_b[i] + 2*in_data_c[i],
         1e-4);
   }
 }
 
-TYPED_TEST(EltwiseLayerTest, TestProdGPU) {
-  Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(EltwiseLayerTest, TestProdGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
   eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD);
-  shared_ptr<EltwiseLayer<TypeParam> > layer(
-      new EltwiseLayer<TypeParam>(layer_param));
-  layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  const TypeParam* data = this->blob_top_->cpu_data();
-  const int count = this->blob_top_->count();
-  const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
-  const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
-  const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
-  for (int i = 0; i < count; ++i) {
-    EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]);
-  }
-}
-
-TYPED_TEST(EltwiseLayerTest, TestSumGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  LayerParameter layer_param;
-  EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
-  eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
-  shared_ptr<EltwiseLayer<TypeParam> > layer(
-      new EltwiseLayer<TypeParam>(layer_param));
-  layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  const TypeParam* data = this->blob_top_->cpu_data();
-  const int count = this->blob_top_->count();
-  const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
-  const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
-  const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
-  for (int i = 0; i < count; ++i) {
-    EXPECT_EQ(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i]);
-  }
-}
-
-TYPED_TEST(EltwiseLayerTest, TestSumCoeffGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  LayerParameter layer_param;
-  EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
-  eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
-  eltwise_param->add_coeff(1);
-  eltwise_param->add_coeff(-0.5);
-  eltwise_param->add_coeff(2);
-  shared_ptr<EltwiseLayer<TypeParam> > layer(
-      new EltwiseLayer<TypeParam>(layer_param));
-  layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  const TypeParam* data = this->blob_top_->cpu_data();
-  const int count = this->blob_top_->count();
-  const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
-  const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
-  const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
-  for (int i = 0; i < count; ++i) {
-    EXPECT_NEAR(data[i], in_data_a[i] - 0.5*in_data_b[i] + 2*in_data_c[i],
-        1e-4);
-  }
-}
-
-TYPED_TEST(EltwiseLayerTest, TestProdCPUGradient) {
-  Caffe::set_mode(Caffe::CPU);
-  LayerParameter layer_param;
-  EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
-  eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD);
-  EltwiseLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
-  checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(EltwiseLayerTest, TestSumCPUGradient) {
-  Caffe::set_mode(Caffe::CPU);
-  LayerParameter layer_param;
-  EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
-  eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
-  EltwiseLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
-  checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(EltwiseLayerTest, TestSumCoeffCPUGradient) {
-  Caffe::set_mode(Caffe::CPU);
-  LayerParameter layer_param;
-  EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
-  eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
-  eltwise_param->add_coeff(1);
-  eltwise_param->add_coeff(-0.5);
-  eltwise_param->add_coeff(2);
-  EltwiseLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
+  EltwiseLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3);
   checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
 
-TYPED_TEST(EltwiseLayerTest, TestSumGPUGradient) {
-  Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(EltwiseLayerTest, TestSumGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
   eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
-  EltwiseLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
+  EltwiseLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3);
   checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
 
-TYPED_TEST(EltwiseLayerTest, TestSumCoeffGPUGradient) {
-  Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(EltwiseLayerTest, TestSumCoeffGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
   eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
   eltwise_param->add_coeff(1);
   eltwise_param->add_coeff(-0.5);
   eltwise_param->add_coeff(2);
-  EltwiseLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
+  EltwiseLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3);
   checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
index 02945b6..8c79694 100644 (file)
@@ -19,8 +19,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class EuclideanLossLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class EuclideanLossLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   EuclideanLossLayerTest()
       : blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
@@ -43,27 +45,17 @@ class EuclideanLossLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(EuclideanLossLayerTest, Dtypes);
+TYPED_TEST_CASE(EuclideanLossLayerTest, TestDtypesAndDevices);
 
-TYPED_TEST(EuclideanLossLayerTest, TestGradientCPU) {
-  Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(EuclideanLossLayerTest, TestGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  EuclideanLossLayer<TypeParam> layer(layer_param);
+  EuclideanLossLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
+  GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
   checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_), -1, -1, -1);
 }
 
-TYPED_TEST(EuclideanLossLayerTest, TestGradientGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  LayerParameter layer_param;
-  EuclideanLossLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
-  checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_), -1, -1, -1);
-}
 
 }  // namespace caffe
index e8b556a..93eda7e 100644 (file)
@@ -10,8 +10,6 @@
 
 namespace caffe {
 
-typedef ::testing::Types<float, double> Dtypes;
-
 template <typename Dtype>
 class ConstantFillerTest : public ::testing::Test {
  protected:
@@ -28,7 +26,7 @@ class ConstantFillerTest : public ::testing::Test {
   shared_ptr<ConstantFiller<Dtype> > filler_;
 };
 
-TYPED_TEST_CASE(ConstantFillerTest, Dtypes);
+TYPED_TEST_CASE(ConstantFillerTest, TestDtypes);
 
 TYPED_TEST(ConstantFillerTest, TestFill) {
   EXPECT_TRUE(this->blob_);
@@ -57,7 +55,7 @@ class UniformFillerTest : public ::testing::Test {
   shared_ptr<UniformFiller<Dtype> > filler_;
 };
 
-TYPED_TEST_CASE(UniformFillerTest, Dtypes);
+TYPED_TEST_CASE(UniformFillerTest, TestDtypes);
 
 TYPED_TEST(UniformFillerTest, TestFill) {
   EXPECT_TRUE(this->blob_);
@@ -84,7 +82,7 @@ class PositiveUnitballFillerTest : public ::testing::Test {
   shared_ptr<PositiveUnitballFiller<Dtype> > filler_;
 };
 
-TYPED_TEST_CASE(PositiveUnitballFillerTest, Dtypes);
+TYPED_TEST_CASE(PositiveUnitballFillerTest, TestDtypes);
 
 TYPED_TEST(PositiveUnitballFillerTest, TestFill) {
   EXPECT_TRUE(this->blob_);
@@ -123,7 +121,7 @@ class GaussianFillerTest : public ::testing::Test {
   shared_ptr<GaussianFiller<Dtype> > filler_;
 };
 
-TYPED_TEST_CASE(GaussianFillerTest, Dtypes);
+TYPED_TEST_CASE(GaussianFillerTest, TestDtypes);
 
 TYPED_TEST(GaussianFillerTest, TestFill) {
   EXPECT_TRUE(this->blob_);
index 52c567b..e6e777e 100644 (file)
@@ -17,8 +17,9 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class FlattenLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class FlattenLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
  protected:
   FlattenLayerTest()
       : blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
@@ -38,12 +39,12 @@ class FlattenLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(FlattenLayerTest, Dtypes);
+TYPED_TEST_CASE(FlattenLayerTest, TestDtypesAndDevices);
 
 TYPED_TEST(FlattenLayerTest, TestSetup) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  FlattenLayer<TypeParam> layer(layer_param);
+  FlattenLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), 2);
   EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5);
@@ -51,10 +52,10 @@ TYPED_TEST(FlattenLayerTest, TestSetup) {
   EXPECT_EQ(this->blob_top_->width(), 1);
 }
 
-TYPED_TEST(FlattenLayerTest, TestCPU) {
+TYPED_TEST(FlattenLayerTest, Test) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  FlattenLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::CPU);
+  FlattenLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   for (int c = 0; c < 3 * 6 * 5; ++c) {
@@ -65,34 +66,11 @@ TYPED_TEST(FlattenLayerTest, TestCPU) {
   }
 }
 
-TYPED_TEST(FlattenLayerTest, TestGPU) {
+TYPED_TEST(FlattenLayerTest, TestGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  FlattenLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::GPU);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  for (int c = 0; c < 3 * 6 * 5; ++c) {
-    EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0),
-        this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5));
-    EXPECT_EQ(this->blob_top_->data_at(1, c, 0, 0),
-        this->blob_bottom_->data_at(1, c / (6 * 5), (c / 5) % 6, c % 5));
-  }
-}
-
-TYPED_TEST(FlattenLayerTest, TestCPUGradient) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  FlattenLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
-  checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(FlattenLayerTest, TestGPUGradient) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  FlattenLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
+  FlattenLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-2);
   checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
index 5b396ef..6fd9a2f 100644 (file)
@@ -19,8 +19,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template<typename Dtype>
-class HDF5OutputLayerTest : public ::testing::Test {
+template<typename TypeParam>
+class HDF5OutputLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   HDF5OutputLayerTest()
       : output_file_name_(tmpnam(NULL)),
@@ -52,9 +54,9 @@ class HDF5OutputLayerTest : public ::testing::Test {
   int width_;
 };
 
-template<typename Dtype>
-void HDF5OutputLayerTest<Dtype>::CheckBlobEqual(const Blob<Dtype>& b1,
-                                                const Blob<Dtype>& b2) {
+template<typename TypeParam>
+void HDF5OutputLayerTest<TypeParam>::CheckBlobEqual(const Blob<Dtype>& b1,
+                                                    const Blob<Dtype>& b2) {
   EXPECT_EQ(b1.num(), b2.num());
   EXPECT_EQ(b1.channels(), b2.channels());
   EXPECT_EQ(b1.height(), b2.height());
@@ -70,14 +72,10 @@ void HDF5OutputLayerTest<Dtype>::CheckBlobEqual(const Blob<Dtype>& b1,
   }
 }
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(HDF5OutputLayerTest, Dtypes);
+TYPED_TEST_CASE(HDF5OutputLayerTest, TestDtypesAndDevices);
 
-/*
- * TestCPUForward and TestGPUForward are almost identical except for the mode.
- * They are separated to use with `test_all.testbin --gtest_filter="*CPU*"`.
- */
-TYPED_TEST(HDF5OutputLayerTest, TestCPUForward) {
+TYPED_TEST(HDF5OutputLayerTest, TestForward) {
+  typedef typename TypeParam::Dtype Dtype;
   LOG(INFO) << "Loading HDF5 file " << this->input_file_name_;
   hid_t file_id = H5Fopen(this->input_file_name_.c_str(), H5F_ACC_RDONLY,
                           H5P_DEFAULT);
@@ -93,62 +91,12 @@ TYPED_TEST(HDF5OutputLayerTest, TestCPUForward) {
   this->blob_bottom_vec_.push_back(this->blob_data_);
   this->blob_bottom_vec_.push_back(this->blob_label_);
 
-  Caffe::set_mode(Caffe::CPU);
-  LayerParameter param;
-  param.mutable_hdf5_output_param()->set_file_name(this->output_file_name_);
-  // This code block ensures that the layer is deconstructed and
-  //   the output hdf5 file is closed.
-  {
-    HDF5OutputLayer<TypeParam> layer(param);
-    EXPECT_EQ(layer.file_name(), this->output_file_name_);
-    layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-    layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
-  }
-  file_id = H5Fopen(this->output_file_name_.c_str(), H5F_ACC_RDONLY,
-                          H5P_DEFAULT);
-  ASSERT_GE(
-    file_id, 0)<< "Failed to open HDF5 file" <<
-          this->input_file_name_;
-
-  Blob<TypeParam>* blob_data = new Blob<TypeParam>();
-  hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
-                       blob_data);
-  this->CheckBlobEqual(*(this->blob_data_), *blob_data);
-
-  Blob<TypeParam>* blob_label = new Blob<TypeParam>();
-  hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
-                       blob_label);
-  this->CheckBlobEqual(*(this->blob_label_), *blob_label);
-
-  status = H5Fclose(file_id);
-  EXPECT_GE(status, 0) << "Failed to close HDF5 file " <<
-      this->output_file_name_;
-}
-
-TYPED_TEST(HDF5OutputLayerTest, TestGPUForward) {
-  LOG(INFO) << "Loading HDF5 file " << this->input_file_name_;
-
-  hid_t file_id = H5Fopen(this->input_file_name_.c_str(), H5F_ACC_RDONLY,
-                          H5P_DEFAULT);
-  ASSERT_GE(file_id, 0) << "Failed to open HDF5 file" <<
-      this->input_file_name_;
-  hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
-                       this->blob_data_);
-  hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
-                       this->blob_label_);
-  herr_t status = H5Fclose(file_id);
-  EXPECT_GE(status, 0) << "Failed to close HDF5 file " <<
-      this->input_file_name_;
-  this->blob_bottom_vec_.push_back(this->blob_data_);
-  this->blob_bottom_vec_.push_back(this->blob_label_);
-
-  Caffe::set_mode(Caffe::GPU);
   LayerParameter param;
   param.mutable_hdf5_output_param()->set_file_name(this->output_file_name_);
   // This code block ensures that the layer is deconstructed and
   //   the output hdf5 file is closed.
   {
-    HDF5OutputLayer<TypeParam> layer(param);
+    HDF5OutputLayer<Dtype> layer(param);
     EXPECT_EQ(layer.file_name(), this->output_file_name_);
     layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
     layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
@@ -159,12 +107,12 @@ TYPED_TEST(HDF5OutputLayerTest, TestGPUForward) {
     file_id, 0)<< "Failed to open HDF5 file" <<
           this->input_file_name_;
 
-  Blob<TypeParam>* blob_data = new Blob<TypeParam>();
+  Blob<Dtype>* blob_data = new Blob<Dtype>();
   hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
                        blob_data);
   this->CheckBlobEqual(*(this->blob_data_), *blob_data);
 
-  Blob<TypeParam>* blob_label = new Blob<TypeParam>();
+  Blob<Dtype>* blob_label = new Blob<Dtype>();
   hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
                        blob_label);
   this->CheckBlobEqual(*(this->blob_label_), *blob_label);
index 1cbca00..3eb2421 100644 (file)
@@ -20,8 +20,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class HDF5DataLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class HDF5DataLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   HDF5DataLayerTest()
       : filename(NULL),
@@ -49,10 +51,10 @@ class HDF5DataLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(HDF5DataLayerTest, Dtypes);
+TYPED_TEST_CASE(HDF5DataLayerTest, TestDtypesAndDevices);
 
 TYPED_TEST(HDF5DataLayerTest, TestRead) {
+  typedef typename TypeParam::Dtype Dtype;
   // Create LayerParameter with the known parameters.
   // The data file we are reading has 10 rows and 8 columns,
   // with values from 0 to 10*8 reshaped in row-major order.
@@ -66,7 +68,7 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) {
   int width = 5;
 
   // Test that the layer setup got the correct parameters.
-  HDF5DataLayer<TypeParam> layer(param);
+  HDF5DataLayer<Dtype> layer(param);
   layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
   EXPECT_EQ(this->blob_top_data_->num(), batch_size);
   EXPECT_EQ(this->blob_top_data_->channels(), num_cols);
@@ -78,49 +80,41 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) {
   EXPECT_EQ(this->blob_top_label_->height(), 1);
   EXPECT_EQ(this->blob_top_label_->width(), 1);
 
-  for (int t = 0; t < 2; ++t) {
-    // TODO: make this a TypedTest instead of this silly loop.
-    if (t == 0) {
-      Caffe::set_mode(Caffe::CPU);
-    } else {
-      Caffe::set_mode(Caffe::GPU);
+  layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+
+  // Go through the data 10 times (5 batches).
+  const int data_size = num_cols * height * width;
+  for (int iter = 0; iter < 10; ++iter) {
+    layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+
+    // On even iterations, we're reading the first half of the data.
+    // On odd iterations, we're reading the second half of the data.
+    int label_offset = (iter % 2 == 0) ? 0 : batch_size;
+    int data_offset = (iter % 2 == 0) ? 0 : batch_size * data_size;
+
+    // Every two iterations we are reading the second file,
+    // which has the same labels, but data is offset by total data size,
+    // which is 2000 (see generate_sample_data).
+    int file_offset = (iter % 4 < 2) ? 0 : 2000;
+
+    for (int i = 0; i < batch_size; ++i) {
+      EXPECT_EQ(
+        label_offset + i,
+        this->blob_top_label_->cpu_data()[i]);
     }
-    layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-
-    // Go through the data 10 times (5 batches).
-    const int data_size = num_cols * height * width;
-    for (int iter = 0; iter < 10; ++iter) {
-      layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
-
-      // On even iterations, we're reading the first half of the data.
-      // On odd iterations, we're reading the second half of the data.
-      int label_offset = (iter % 2 == 0) ? 0 : batch_size;
-      int data_offset = (iter % 2 == 0) ? 0 : batch_size * data_size;
-
-      // Every two iterations we are reading the second file,
-      // which has the same labels, but data is offset by total data size,
-      // which is 2000 (see generate_sample_data).
-      int file_offset = (iter % 4 < 2) ? 0 : 2000;
-
-      for (int i = 0; i < batch_size; ++i) {
-        EXPECT_EQ(
-          label_offset + i,
-          this->blob_top_label_->cpu_data()[i]);
-      }
-      for (int i = 0; i < batch_size; ++i) {
-        for (int j = 0; j < num_cols; ++j) {
-          for (int h = 0; h < height; ++h) {
-            for (int w = 0; w < width; ++w) {
-              int idx = (
-                i * num_cols * height * width +
-                j * height * width +
-                h * width + w);
-              EXPECT_EQ(
-                file_offset + data_offset + idx,
-                this->blob_top_data_->cpu_data()[idx])
-                << "debug: i " << i << " j " << j
-                << " iter " << iter << " t " << t;
-            }
+    for (int i = 0; i < batch_size; ++i) {
+      for (int j = 0; j < num_cols; ++j) {
+        for (int h = 0; h < height; ++h) {
+          for (int w = 0; w < width; ++w) {
+            int idx = (
+              i * num_cols * height * width +
+              j * height * width +
+              h * width + w);
+            EXPECT_EQ(
+              file_offset + data_offset + idx,
+              this->blob_top_data_->cpu_data()[idx])
+              << "debug: i " << i << " j " << j
+              << " iter " << iter;
           }
         }
       }
index 318030b..868e9e9 100644 (file)
@@ -19,8 +19,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class HingeLossLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class HingeLossLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   HingeLossLayerTest()
       : blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
@@ -46,56 +48,31 @@ class HingeLossLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(HingeLossLayerTest, Dtypes);
+TYPED_TEST_CASE(HingeLossLayerTest, TestDtypesAndDevices);
 
 
-TYPED_TEST(HingeLossLayerTest, TestGradientL1CPU) {
+TYPED_TEST(HingeLossLayerTest, TestGradientL1) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  HingeLossLayer<TypeParam> layer(layer_param);
+  HingeLossLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 1, 0.01);
+  GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 1, 0.01);
   checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_), 0, -1, -1);
 }
 
-TYPED_TEST(HingeLossLayerTest, TestGradientL1GPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  HingeLossLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 1, 0.01);
-  checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_), 0, -1, -1);
-}
-
-
-TYPED_TEST(HingeLossLayerTest, TestGradientL2CPU) {
+TYPED_TEST(HingeLossLayerTest, TestGradientL2) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   // Set norm to L2
   HingeLossParameter* hinge_loss_param = layer_param.mutable_hinge_loss_param();
   hinge_loss_param->set_norm(HingeLossParameter_Norm_L2);
-  Caffe::set_mode(Caffe::CPU);
-  HingeLossLayer<TypeParam> layer(layer_param);
+  HingeLossLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-  GradientChecker<TypeParam> checker(1e-2, 2e-3, 1701);
+  GradientChecker<Dtype> checker(1e-2, 2e-3, 1701);
   checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_), 0, -1, -1);
 }
 
 
-TYPED_TEST(HingeLossLayerTest, TestGradientL2GPU) {
-  LayerParameter layer_param;
-  // Set norm to L2
-  HingeLossParameter* hinge_loss_param = layer_param.mutable_hinge_loss_param();
-  hinge_loss_param->set_norm(HingeLossParameter_Norm_L2);
-  Caffe::set_mode(Caffe::GPU);
-  HingeLossLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-  GradientChecker<TypeParam> checker(1e-2, 2e-3, 1701);
-  checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_), 0, -1, -1);
-}
-
 }  // namespace caffe
index f49a5b7..bd4404a 100644 (file)
@@ -65,8 +65,7 @@ class Im2colKernelTest : public ::testing::Test {
   int width_col_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(Im2colKernelTest, Dtypes);
+TYPED_TEST_CASE(Im2colKernelTest, TestDtypes);
 
 TYPED_TEST(Im2colKernelTest, TestGPU) {
   Caffe::set_mode(Caffe::GPU);
index 7f677ca..5be1917 100644 (file)
@@ -17,8 +17,9 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class Im2colLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class Im2colLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
  protected:
   Im2colLayerTest()
       : blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
@@ -37,16 +38,16 @@ class Im2colLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(Im2colLayerTest, Dtypes);
+TYPED_TEST_CASE(Im2colLayerTest, TestDtypesAndDevices);
 
 TYPED_TEST(Im2colLayerTest, TestSetup) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   ConvolutionParameter* convolution_param =
       layer_param.mutable_convolution_param();
   convolution_param->set_kernel_size(3);
   convolution_param->set_stride(2);
-  Im2colLayer<TypeParam> layer(layer_param);
+  Im2colLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), 2);
   EXPECT_EQ(this->blob_top_->channels(), 27);
@@ -54,31 +55,14 @@ TYPED_TEST(Im2colLayerTest, TestSetup) {
   EXPECT_EQ(this->blob_top_->width(), 2);
 }
 
-TYPED_TEST(Im2colLayerTest, TestCPU) {
+TYPED_TEST(Im2colLayerTest, TestForward) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   ConvolutionParameter* convolution_param =
       layer_param.mutable_convolution_param();
   convolution_param->set_kernel_size(3);
   convolution_param->set_stride(2);
-  Im2colLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::CPU);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  // We are lazy and will only check the top left block
-  for (int c = 0; c < 27; ++c) {
-    EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0),
-        this->blob_bottom_->data_at(0, (c / 9), (c / 3) % 3, c % 3));
-  }
-}
-
-TYPED_TEST(Im2colLayerTest, TestGPU) {
-  LayerParameter layer_param;
-  ConvolutionParameter* convolution_param =
-      layer_param.mutable_convolution_param();
-  convolution_param->set_kernel_size(3);
-  convolution_param->set_stride(2);
-  Im2colLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::GPU);
+  Im2colLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // We are lazy and will only check the top left block
@@ -88,28 +72,15 @@ TYPED_TEST(Im2colLayerTest, TestGPU) {
   }
 }
 
-TYPED_TEST(Im2colLayerTest, TestCPUGradient) {
-  LayerParameter layer_param;
-  ConvolutionParameter* convolution_param =
-      layer_param.mutable_convolution_param();
-  convolution_param->set_kernel_size(3);
-  convolution_param->set_stride(2);
-  Caffe::set_mode(Caffe::CPU);
-  Im2colLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
-  checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(Im2colLayerTest, TestGPUGradient) {
+TYPED_TEST(Im2colLayerTest, TestGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   ConvolutionParameter* convolution_param =
       layer_param.mutable_convolution_param();
   convolution_param->set_kernel_size(3);
   convolution_param->set_stride(2);
-  Caffe::set_mode(Caffe::GPU);
-  Im2colLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
+  Im2colLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-2);
   checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
index 2278f4e..fbd4a1c 100644 (file)
@@ -23,8 +23,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class ImageDataLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class ImageDataLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   ImageDataLayerTest()
       : seed_(1701),
@@ -57,16 +59,16 @@ class ImageDataLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(ImageDataLayerTest, Dtypes);
+TYPED_TEST_CASE(ImageDataLayerTest, TestDtypesAndDevices);
 
 TYPED_TEST(ImageDataLayerTest, TestRead) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter param;
   ImageDataParameter* image_data_param = param.mutable_image_data_param();
   image_data_param->set_batch_size(5);
   image_data_param->set_source(this->filename_->c_str());
   image_data_param->set_shuffle(false);
-  ImageDataLayer<TypeParam> layer(param);
+  ImageDataLayer<Dtype> layer(param);
   layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
   EXPECT_EQ(this->blob_top_data_->num(), 5);
   EXPECT_EQ(this->blob_top_data_->channels(), 3);
@@ -86,6 +88,7 @@ TYPED_TEST(ImageDataLayerTest, TestRead) {
 }
 
 TYPED_TEST(ImageDataLayerTest, TestResize) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter param;
   ImageDataParameter* image_data_param = param.mutable_image_data_param();
   image_data_param->set_batch_size(5);
@@ -93,7 +96,7 @@ TYPED_TEST(ImageDataLayerTest, TestResize) {
   image_data_param->set_new_height(256);
   image_data_param->set_new_width(256);
   image_data_param->set_shuffle(false);
-  ImageDataLayer<TypeParam> layer(param);
+  ImageDataLayer<Dtype> layer(param);
   layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
   EXPECT_EQ(this->blob_top_data_->num(), 5);
   EXPECT_EQ(this->blob_top_data_->channels(), 3);
@@ -113,12 +116,13 @@ TYPED_TEST(ImageDataLayerTest, TestResize) {
 }
 
 TYPED_TEST(ImageDataLayerTest, TestShuffle) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter param;
   ImageDataParameter* image_data_param = param.mutable_image_data_param();
   image_data_param->set_batch_size(5);
   image_data_param->set_source(this->filename_->c_str());
   image_data_param->set_shuffle(true);
-  ImageDataLayer<TypeParam> layer(param);
+  ImageDataLayer<Dtype> layer(param);
   layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
   EXPECT_EQ(this->blob_top_data_->num(), 5);
   EXPECT_EQ(this->blob_top_data_->channels(), 3);
@@ -131,14 +135,14 @@ TYPED_TEST(ImageDataLayerTest, TestShuffle) {
   // Go through the data twice
   for (int iter = 0; iter < 2; ++iter) {
     layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
-    map<TypeParam, int> values_to_indices;
+    map<Dtype, int> values_to_indices;
     int num_in_order = 0;
     for (int i = 0; i < 5; ++i) {
-      TypeParam value = this->blob_top_label_->cpu_data()[i];
+      Dtype value = this->blob_top_label_->cpu_data()[i];
       // Check that the value has not been seen already (no duplicates).
       EXPECT_EQ(values_to_indices.find(value), values_to_indices.end());
       values_to_indices[value] = i;
-      num_in_order += (value == TypeParam(i));
+      num_in_order += (value == Dtype(i));
     }
     EXPECT_EQ(5, values_to_indices.size());
     EXPECT_GT(5, num_in_order);
index 91917df..ad4783f 100644 (file)
@@ -17,8 +17,9 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class InnerProductLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class InnerProductLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
  protected:
   InnerProductLayerTest()
       : blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
@@ -37,16 +38,16 @@ class InnerProductLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(InnerProductLayerTest, Dtypes);
+TYPED_TEST_CASE(InnerProductLayerTest, TestDtypesAndDevices);
 
 TYPED_TEST(InnerProductLayerTest, TestSetUp) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   InnerProductParameter* inner_product_param =
       layer_param.mutable_inner_product_param();
   inner_product_param->set_num_output(10);
-  shared_ptr<InnerProductLayer<TypeParam> > layer(
-      new InnerProductLayer<TypeParam>(layer_param));
+  shared_ptr<InnerProductLayer<Dtype> > layer(
+      new InnerProductLayer<Dtype>(layer_param));
   layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), 2);
   EXPECT_EQ(this->blob_top_->height(), 1);
@@ -54,43 +55,23 @@ TYPED_TEST(InnerProductLayerTest, TestSetUp) {
   EXPECT_EQ(this->blob_top_->channels(), 10);
 }
 
-TYPED_TEST(InnerProductLayerTest, TestCPU) {
-  LayerParameter layer_param;
-  InnerProductParameter* inner_product_param =
-      layer_param.mutable_inner_product_param();
-  Caffe::set_mode(Caffe::CPU);
-  inner_product_param->set_num_output(10);
-  inner_product_param->mutable_weight_filler()->set_type("uniform");
-  inner_product_param->mutable_bias_filler()->set_type("uniform");
-  inner_product_param->mutable_bias_filler()->set_min(1);
-  inner_product_param->mutable_bias_filler()->set_max(2);
-  shared_ptr<InnerProductLayer<TypeParam> > layer(
-      new InnerProductLayer<TypeParam>(layer_param));
-  layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  const TypeParam* data = this->blob_top_->cpu_data();
-  const int count = this->blob_top_->count();
-  for (int i = 0; i < count; ++i) {
-    EXPECT_GE(data[i], 1.);
-  }
-}
-
-TYPED_TEST(InnerProductLayerTest, TestGPU) {
-  if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
+TYPED_TEST(InnerProductLayerTest, TestForward) {
+  typedef typename TypeParam::Dtype Dtype;
+  if (Caffe::mode() == Caffe::CPU ||
+      sizeof(Dtype) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
     LayerParameter layer_param;
     InnerProductParameter* inner_product_param =
         layer_param.mutable_inner_product_param();
-    Caffe::set_mode(Caffe::GPU);
     inner_product_param->set_num_output(10);
     inner_product_param->mutable_weight_filler()->set_type("uniform");
     inner_product_param->mutable_bias_filler()->set_type("uniform");
     inner_product_param->mutable_bias_filler()->set_min(1);
     inner_product_param->mutable_bias_filler()->set_max(2);
-    shared_ptr<InnerProductLayer<TypeParam> > layer(
-      new InnerProductLayer<TypeParam>(layer_param));
+    shared_ptr<InnerProductLayer<Dtype> > layer(
+        new InnerProductLayer<Dtype>(layer_param));
     layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
     layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-    const TypeParam* data = this->blob_top_->cpu_data();
+    const Dtype* data = this->blob_top_->cpu_data();
     const int count = this->blob_top_->count();
     for (int i = 0; i < count; ++i) {
       EXPECT_GE(data[i], 1.);
@@ -100,34 +81,21 @@ TYPED_TEST(InnerProductLayerTest, TestGPU) {
   }
 }
 
-TYPED_TEST(InnerProductLayerTest, TestCPUGradient) {
-  LayerParameter layer_param;
-  InnerProductParameter* inner_product_param =
-      layer_param.mutable_inner_product_param();
-  Caffe::set_mode(Caffe::CPU);
-  inner_product_param->set_num_output(10);
-  inner_product_param->mutable_weight_filler()->set_type("gaussian");
-  inner_product_param->mutable_bias_filler()->set_type("gaussian");
-  inner_product_param->mutable_bias_filler()->set_min(1);
-  inner_product_param->mutable_bias_filler()->set_max(2);
-  InnerProductLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
-  checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(InnerProductLayerTest, TestGPUGradient) {
-  if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
+TYPED_TEST(InnerProductLayerTest, TestGradient) {
+  typedef typename TypeParam::Dtype Dtype;
+  if (Caffe::mode() == Caffe::CPU ||
+      sizeof(Dtype) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
     LayerParameter layer_param;
     InnerProductParameter* inner_product_param =
         layer_param.mutable_inner_product_param();
-    Caffe::set_mode(Caffe::GPU);
     inner_product_param->set_num_output(10);
     inner_product_param->mutable_weight_filler()->set_type("gaussian");
     inner_product_param->mutable_bias_filler()->set_type("gaussian");
-    InnerProductLayer<TypeParam> layer(layer_param);
-    GradientChecker<TypeParam> checker(1e-2, 1e-2);
-    checker.CheckGradient(&layer, &(this->blob_bottom_vec_),
+    inner_product_param->mutable_bias_filler()->set_min(1);
+    inner_product_param->mutable_bias_filler()->set_max(2);
+    InnerProductLayer<Dtype> layer(layer_param);
+    GradientChecker<Dtype> checker(1e-2, 1e-3);
+    checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
         &(this->blob_top_vec_));
   } else {
     LOG(ERROR) << "Skipping test due to old architecture.";
index 6996a23..a627c97 100644 (file)
@@ -21,8 +21,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class LRNLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class LRNLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   LRNLayerTest()
       : epsilon_(Dtype(1e-5)),
@@ -49,10 +51,11 @@ class LRNLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-template <typename Dtype>
-void LRNLayerTest<Dtype>::ReferenceLRNForward(
+template <typename TypeParam>
+void LRNLayerTest<TypeParam>::ReferenceLRNForward(
     const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
     Blob<Dtype>* blob_top) {
+  typedef typename TypeParam::Dtype Dtype;
   blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
       blob_bottom.height(), blob_bottom.width());
   Dtype* top_data = blob_top->mutable_cpu_data();
@@ -111,12 +114,12 @@ void LRNLayerTest<Dtype>::ReferenceLRNForward(
   }
 }
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(LRNLayerTest, Dtypes);
+TYPED_TEST_CASE(LRNLayerTest, TestDtypesAndDevices);
 
 TYPED_TEST(LRNLayerTest, TestSetupAcrossChannels) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  LRNLayer<TypeParam> layer(layer_param);
+  LRNLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), 2);
   EXPECT_EQ(this->blob_top_->channels(), 7);
@@ -124,28 +127,13 @@ TYPED_TEST(LRNLayerTest, TestSetupAcrossChannels) {
   EXPECT_EQ(this->blob_top_->width(), 3);
 }
 
-TYPED_TEST(LRNLayerTest, TestCPUForwardAcrossChannels) {
-  LayerParameter layer_param;
-  LRNLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::CPU);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  Blob<TypeParam> top_reference;
-  this->ReferenceLRNForward(*(this->blob_bottom_), layer_param,
-      &top_reference);
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i],
-                this->epsilon_);
-  }
-}
-
-TYPED_TEST(LRNLayerTest, TestGPUForwardAcrossChannels) {
+TYPED_TEST(LRNLayerTest, TestForwardAcrossChannels) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  LRNLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::GPU);
+  LRNLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  Blob<TypeParam> top_reference;
+  Blob<Dtype> top_reference;
   this->ReferenceLRNForward(*(this->blob_bottom_), layer_param,
       &top_reference);
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
@@ -154,11 +142,11 @@ TYPED_TEST(LRNLayerTest, TestGPUForwardAcrossChannels) {
   }
 }
 
-TYPED_TEST(LRNLayerTest, TestCPUGradientAcrossChannels) {
+TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  LRNLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
-  Caffe::set_mode(Caffe::CPU);
+  LRNLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-2);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   for (int i = 0; i < this->blob_top_->count(); ++i) {
@@ -175,33 +163,13 @@ TYPED_TEST(LRNLayerTest, TestCPUGradientAcrossChannels) {
       &(this->blob_top_vec_));
 }
 
-TYPED_TEST(LRNLayerTest, TestGPUGradientAcrossChannels) {
-  LayerParameter layer_param;
-  LRNLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
-  Caffe::set_mode(Caffe::GPU);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  for (int i = 0; i < this->blob_top_->count(); ++i) {
-    this->blob_top_->mutable_cpu_diff()[i] = 1.;
-  }
-  vector<bool> propagate_down(this->blob_bottom_vec_.size(), true);
-  layer.Backward(this->blob_top_vec_, propagate_down,
-                 &(this->blob_bottom_vec_));
-  // for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-  //   std::cout << "GPU diff " << this->blob_bottom_->cpu_diff()[i]
-  //       << std::endl;
-  // }
-  checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
 TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   layer_param.mutable_lrn_param()->set_norm_region(
       LRNParameter_NormRegion_WITHIN_CHANNEL);
   layer_param.mutable_lrn_param()->set_local_size(3);
-  LRNLayer<TypeParam> layer(layer_param);
+  LRNLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), 2);
   EXPECT_EQ(this->blob_top_->channels(), 7);
@@ -209,16 +177,16 @@ TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) {
   EXPECT_EQ(this->blob_top_->width(), 3);
 }
 
-TYPED_TEST(LRNLayerTest, TestCPUForwardWithinChannel) {
+TYPED_TEST(LRNLayerTest, TestForwardWithinChannel) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   layer_param.mutable_lrn_param()->set_norm_region(
       LRNParameter_NormRegion_WITHIN_CHANNEL);
   layer_param.mutable_lrn_param()->set_local_size(3);
-  LRNLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::CPU);
+  LRNLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  Blob<TypeParam> top_reference;
+  Blob<Dtype> top_reference;
   this->ReferenceLRNForward(*(this->blob_bottom_), layer_param,
       &top_reference);
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
@@ -227,49 +195,14 @@ TYPED_TEST(LRNLayerTest, TestCPUForwardWithinChannel) {
   }
 }
 
-TYPED_TEST(LRNLayerTest, TestGPUForwardWithinChannel) {
-  LayerParameter layer_param;
-  layer_param.mutable_lrn_param()->set_norm_region(
-      LRNParameter_NormRegion_WITHIN_CHANNEL);
-  layer_param.mutable_lrn_param()->set_local_size(3);
-  LRNLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::GPU);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  Blob<TypeParam> top_reference;
-  this->ReferenceLRNForward(*(this->blob_bottom_), layer_param,
-      &top_reference);
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i],
-                this->epsilon_);
-  }
-}
-
-TYPED_TEST(LRNLayerTest, TestCPUGradientWithinChannel) {
-  LayerParameter layer_param;
-  layer_param.mutable_lrn_param()->set_norm_region(
-      LRNParameter_NormRegion_WITHIN_CHANNEL);
-  layer_param.mutable_lrn_param()->set_local_size(3);
-  LRNLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
-  Caffe::set_mode(Caffe::CPU);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  for (int i = 0; i < this->blob_top_->count(); ++i) {
-    this->blob_top_->mutable_cpu_diff()[i] = 1.;
-  }
-  checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(LRNLayerTest, TestGPUGradientWithinChannel) {
+TYPED_TEST(LRNLayerTest, TestGradientWithinChannel) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   layer_param.mutable_lrn_param()->set_norm_region(
       LRNParameter_NormRegion_WITHIN_CHANNEL);
   layer_param.mutable_lrn_param()->set_local_size(3);
-  LRNLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
-  Caffe::set_mode(Caffe::GPU);
+  LRNLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-2);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   for (int i = 0; i < this->blob_top_->count(); ++i) {
index ddb9f06..2a70c9c 100644 (file)
@@ -65,8 +65,7 @@ class MathFunctionsTest : public ::testing::Test {
   Blob<Dtype>* const blob_top_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(MathFunctionsTest, Dtypes);
+TYPED_TEST_CASE(MathFunctionsTest, TestDtypes);
 
 TYPED_TEST(MathFunctionsTest, TestNothing) {
   // The first test case of a test suite takes the longest time
index ac6f86f..eef375a 100644 (file)
@@ -16,8 +16,9 @@
 
 namespace caffe {
 
-template <typename Dtype>
-class MaxPoolingDropoutTest : public ::testing::Test {
+template <typename TypeParam>
+class MaxPoolingDropoutTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
  protected:
   MaxPoolingDropoutTest()
       : blob_bottom_(new Blob<Dtype>()),
@@ -40,17 +41,17 @@ class MaxPoolingDropoutTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(MaxPoolingDropoutTest, Dtypes);
+TYPED_TEST_CASE(MaxPoolingDropoutTest, TestDtypesAndDevices);
 
 TYPED_TEST(MaxPoolingDropoutTest, TestSetup) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
   pooling_param->set_kernel_size(3);
   pooling_param->set_stride(2);
-  PoolingLayer<TypeParam> max_layer(layer_param);
+  PoolingLayer<Dtype> max_layer(layer_param);
   max_layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  DropoutLayer<TypeParam> dropout_layer(layer_param);
+  DropoutLayer<Dtype> dropout_layer(layer_param);
   dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
   EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
@@ -59,27 +60,27 @@ TYPED_TEST(MaxPoolingDropoutTest, TestSetup) {
 }
 
 
-TYPED_TEST(MaxPoolingDropoutTest, CPUForward) {
-  Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(MaxPoolingDropoutTest, TestForward) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
   pooling_param->set_kernel_size(3);
   pooling_param->set_stride(2);
-  PoolingLayer<TypeParam> layer(layer_param);
+  PoolingLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  TypeParam sum = 0.;
+  const Dtype* top_data = this->blob_top_->cpu_data();
+  Dtype sum = 0.;
   for (int i = 0; i < this->blob_top_->count(); ++i) {
     sum += top_data[i];
   }
   EXPECT_EQ(sum, this->blob_top_->count());
   // Dropout in-place
-  DropoutLayer<TypeParam> dropout_layer(layer_param);
+  DropoutLayer<Dtype> dropout_layer(layer_param);
   dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
   dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
   sum = 0.;
-  TypeParam scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
+  Dtype scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
   top_data = this->blob_top_->cpu_data();
   for (int i = 0; i < this->blob_top_->count(); ++i) {
     sum += top_data[i];
@@ -88,81 +89,14 @@ TYPED_TEST(MaxPoolingDropoutTest, CPUForward) {
   EXPECT_LE(sum, this->blob_top_->count()*scale);
 }
 
-TYPED_TEST(MaxPoolingDropoutTest, GPUForward) {
-  Caffe::set_mode(Caffe::GPU);
-  LayerParameter layer_param;
-  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
-  pooling_param->set_kernel_size(3);
-  pooling_param->set_stride(2);
-  PoolingLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  TypeParam sum = 0.;
-  for (int i = 0; i < this->blob_top_->count(); ++i) {
-    sum += top_data[i];
-  }
-  EXPECT_EQ(sum, this->blob_top_->count());
-
-  DropoutLayer<TypeParam> dropout_layer(layer_param);
-  dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
-  dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
-  sum = 0.;
-  TypeParam scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
-  top_data = this->blob_top_->cpu_data();
-  for (int i = 0; i < this->blob_top_->count(); ++i) {
-    sum += top_data[i];
-  }
-  EXPECT_GE(sum, 0);
-  EXPECT_LE(sum, this->blob_top_->count()*scale);
-}
-
-TYPED_TEST(MaxPoolingDropoutTest, CPUBackward) {
-  Caffe::set_mode(Caffe::CPU);
-  Caffe::set_phase(Caffe::TRAIN);
-  LayerParameter layer_param;
-  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
-  pooling_param->set_kernel_size(3);
-  pooling_param->set_stride(2);
-  PoolingLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  for (int i = 0; i < this->blob_top_->count(); ++i) {
-    this->blob_top_->mutable_cpu_diff()[i] = 1.;
-  }
-  vector<bool> propagate_down(this->blob_bottom_vec_.size(), true);
-  layer.Backward(this->blob_top_vec_, propagate_down,
-                 &(this->blob_bottom_vec_));
-  const TypeParam* bottom_diff = this->blob_bottom_->cpu_diff();
-  TypeParam sum = 0.;
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    sum += bottom_diff[i];
-  }
-  EXPECT_EQ(sum, this->blob_top_->count());
-  // Dropout in-place
-  DropoutLayer<TypeParam> dropout_layer(layer_param);
-  dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
-  dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
-  dropout_layer.Backward(this->blob_top_vec_, propagate_down,
-                         &(this->blob_top_vec_));
-  layer.Backward(this->blob_top_vec_, propagate_down,
-                 &(this->blob_bottom_vec_));
-  TypeParam sum_with_dropout = 0.;
-  bottom_diff = this->blob_bottom_->cpu_diff();
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    sum_with_dropout += bottom_diff[i];
-  }
-  EXPECT_GE(sum_with_dropout, sum);
-}
-
-TYPED_TEST(MaxPoolingDropoutTest, GPUBackward) {
-  Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(MaxPoolingDropoutTest, TestBackward) {
+  typedef typename TypeParam::Dtype Dtype;
   Caffe::set_phase(Caffe::TRAIN);
   LayerParameter layer_param;
   PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
   pooling_param->set_kernel_size(3);
   pooling_param->set_stride(2);
-  PoolingLayer<TypeParam> layer(layer_param);
+  PoolingLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   for (int i = 0; i < this->blob_top_->count(); ++i) {
@@ -171,21 +105,21 @@ TYPED_TEST(MaxPoolingDropoutTest, GPUBackward) {
   vector<bool> propagate_down(this->blob_bottom_vec_.size(), true);
   layer.Backward(this->blob_top_vec_, propagate_down,
                  &(this->blob_bottom_vec_));
-  const TypeParam* bottom_diff = this->blob_bottom_->cpu_diff();
-  TypeParam sum = 0.;
+  const Dtype* bottom_diff = this->blob_bottom_->cpu_diff();
+  Dtype sum = 0.;
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
     sum += bottom_diff[i];
   }
   EXPECT_EQ(sum, this->blob_top_->count());
   // Dropout in-place
-  DropoutLayer<TypeParam> dropout_layer(layer_param);
+  DropoutLayer<Dtype> dropout_layer(layer_param);
   dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
   dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
   dropout_layer.Backward(this->blob_top_vec_, propagate_down,
                          &(this->blob_top_vec_));
   layer.Backward(this->blob_top_vec_, propagate_down,
                  &(this->blob_bottom_vec_));
-  TypeParam sum_with_dropout = 0.;
+  Dtype sum_with_dropout = 0.;
   bottom_diff = this->blob_bottom_->cpu_diff();
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
     sum_with_dropout += bottom_diff[i];
index 9781439..c9d5889 100644 (file)
@@ -55,8 +55,7 @@ class MemoryDataLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(MemoryDataLayerTest, Dtypes);
+TYPED_TEST_CASE(MemoryDataLayerTest, TestDtypes);
 
 TYPED_TEST(MemoryDataLayerTest, TestSetup) {
   LayerParameter layer_param;
index aa475ca..d73347e 100644 (file)
@@ -46,8 +46,7 @@ class MultinomialLogisticLossLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, Dtypes);
+TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes);
 
 
 TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) {
index 18b0347..129d444 100644 (file)
 
 namespace caffe {
 
-template <typename Dtype>
+template <typename TypeParam>
 class NetTest : public ::testing::Test {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   NetTest() : seed_(1701) {}
 
@@ -456,8 +458,7 @@ class NetTest : public ::testing::Test {
   shared_ptr<Net<Dtype> > net_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(NetTest, Dtypes);
+TYPED_TEST_CASE(NetTest, TestDtypesAndDevices);
 
 TYPED_TEST(NetTest, TestHasBlob) {
   this->InitTinyNet();
@@ -507,6 +508,7 @@ TYPED_TEST(NetTest, TestBottomNeedBackward) {
 }
 
 TYPED_TEST(NetTest, TestBottomNeedBackwardForce) {
+  typedef typename TypeParam::Dtype Dtype;
   const bool force_backward = true;
   this->InitTinyNet(force_backward);
   const vector<vector<bool> >& bottom_need_backward =
@@ -521,6 +523,7 @@ TYPED_TEST(NetTest, TestBottomNeedBackwardForce) {
 }
 
 TYPED_TEST(NetTest, TestBottomNeedBackwardEuclideanForce) {
+  typedef typename TypeParam::Dtype Dtype;
   const bool force_backward = true;
   this->InitTinyNetEuclidean(force_backward);
   const vector<vector<bool> >& bottom_need_backward =
@@ -553,32 +556,35 @@ TYPED_TEST(NetTest, TestBottomNeedBackwardTricky) {
 }
 
 TYPED_TEST(NetTest, TestUnsharedWeightsDataNet) {
+  typedef typename TypeParam::Dtype Dtype;
   this->InitUnsharedWeightsNet();
-  vector<Blob<TypeParam>*> bottom;
-  TypeParam loss;
+  vector<Blob<Dtype>*> bottom;
+  Dtype loss;
   this->net_->Forward(bottom, &loss);
   EXPECT_GT(loss, 0);
 }
 
 TYPED_TEST(NetTest, TestSharedWeightsDataNet) {
+  typedef typename TypeParam::Dtype Dtype;
   this->InitSharedWeightsNet();
-  vector<Blob<TypeParam>*> bottom;
-  TypeParam loss;
+  vector<Blob<Dtype>*> bottom;
+  Dtype loss;
   this->net_->Forward(bottom, &loss);
   EXPECT_FLOAT_EQ(loss, 0);
 }
 
 TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) {
+  typedef typename TypeParam::Dtype Dtype;
   this->InitUnsharedWeightsNet();
-  vector<Blob<TypeParam>*> bottom;
-  Net<TypeParam>* net = this->net_.get();
+  vector<Blob<Dtype>*> bottom;
+  Net<Dtype>* net = this->net_.get();
   net->Forward(bottom);
   net->Backward();
-  Layer<TypeParam>* ip1_layer = net->layer_by_name("innerproduct1").get();
-  Layer<TypeParam>* ip2_layer = net->layer_by_name("innerproduct2").get();
+  Layer<Dtype>* ip1_layer = net->layer_by_name("innerproduct1").get();
+  Layer<Dtype>* ip2_layer = net->layer_by_name("innerproduct2").get();
   const int count = ip1_layer->blobs()[0]->count();
-  const TypeParam* grad1 = ip1_layer->blobs()[0]->cpu_diff();
-  const TypeParam* grad2 = ip2_layer->blobs()[0]->cpu_diff();
+  const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff();
+  const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff();
   for (int i = 0; i < count; ++i) {
     EXPECT_GT(fabs(grad1[i]), 0);
     EXPECT_FLOAT_EQ(-1 * grad1[i], grad2[i]);
@@ -586,120 +592,34 @@ TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) {
 }
 
 TYPED_TEST(NetTest, TestSharedWeightsDiffNet) {
+  typedef typename TypeParam::Dtype Dtype;
   this->InitSharedWeightsNet();
-  vector<Blob<TypeParam>*> bottom;
-  Net<TypeParam>* net = this->net_.get();
-  TypeParam loss;
+  vector<Blob<Dtype>*> bottom;
+  Net<Dtype>* net = this->net_.get();
+  Dtype loss;
   net->Forward(bottom, &loss);
   net->Backward();
   EXPECT_FLOAT_EQ(loss, 0);
-  Layer<TypeParam>* ip1_layer = net->layer_by_name("innerproduct1").get();
-  Layer<TypeParam>* ip2_layer = net->layer_by_name("innerproduct2").get();
+  Layer<Dtype>* ip1_layer = net->layer_by_name("innerproduct1").get();
+  Layer<Dtype>* ip2_layer = net->layer_by_name("innerproduct2").get();
   const int count = ip1_layer->blobs()[0]->count();
-  const TypeParam* grad1 = ip1_layer->blobs()[0]->cpu_diff();
-  const TypeParam* grad2 = ip2_layer->blobs()[0]->cpu_diff();
+  const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff();
+  const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff();
   for (int i = 0; i < count; ++i) {
     EXPECT_FLOAT_EQ(0, grad1[i]);
     EXPECT_FLOAT_EQ(0, grad2[i]);
   }
 }
 
-TYPED_TEST(NetTest, TestSharedWeightsUpdateCPU) {
-  Caffe::set_random_seed(this->seed_);
-  Caffe::set_mode(Caffe::CPU);
-  this->InitDiffDataSharedWeightsNet();
-  vector<Blob<TypeParam>*> bottom;
-  EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1");
-  EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2");
-  Blob<TypeParam>* ip1_weights = this->net_->layers()[1]->blobs()[0].get();
-  Blob<TypeParam>* ip2_weights = this->net_->layers()[2]->blobs()[0].get();
-  // Check that data blobs of shared weights share the same location in memory.
-  EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data());
-  // Check that diff blobs of shared weights are at different locations in
-  // locations.  (The diffs should be accumulated at update time.)
-  EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff());
-  this->net_->Forward(bottom);
-  this->net_->Backward();
-  // Compute the expected update as the data minus the two diffs.
-  Blob<TypeParam> shared_params;
-  const bool reshape = true;
-  const bool copy_diff = false;
-  shared_params.CopyFrom(*ip1_weights, copy_diff, reshape);
-  shared_params.CopyFrom(*ip1_weights, !copy_diff, reshape);
-  const int count = ip1_weights->count();
-  // Make sure the diffs are non-trivial.
-  for (int i = 0; i < count; ++i) {
-    EXPECT_NE(0, ip1_weights->cpu_diff()[i]);
-    EXPECT_NE(0, ip2_weights->cpu_diff()[i]);
-    EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]);
-  }
-  caffe_axpy(count, TypeParam(1), ip2_weights->cpu_diff(),
-             shared_params.mutable_cpu_diff());
-  caffe_axpy(count, TypeParam(-1), shared_params.cpu_diff(),
-             shared_params.mutable_cpu_data());
-  const TypeParam* expected_updated_params = shared_params.cpu_data();
-  this->net_->Update();
-  const TypeParam* actual_updated_params = ip1_weights->cpu_data();
-  for (int i = 0; i < count; ++i) {
-    EXPECT_EQ(expected_updated_params[i], actual_updated_params[i]);
-  }
-  // Check that data blobs of shared weights STILL point to the same memory
-  // location (because ... who knows).
-  EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data());
-
-  Caffe::set_random_seed(this->seed_);
-  this->InitDiffDataUnsharedWeightsNet();
-  EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1");
-  EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2");
-  ip1_weights = this->net_->layers()[1]->blobs()[0].get();
-  ip2_weights = this->net_->layers()[2]->blobs()[0].get();
-  // Check that data and diff blobs of unshared weights are at different
-  // locations in memory.
-  EXPECT_NE(ip1_weights->cpu_data(), ip2_weights->cpu_data());
-  EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff());
-  this->net_->Forward(bottom);
-  this->net_->Backward();
-  // Compute the expected update.
-  Blob<TypeParam> unshared_params1;
-  unshared_params1.CopyFrom(*ip1_weights, copy_diff, reshape);
-  unshared_params1.CopyFrom(*ip1_weights, !copy_diff, reshape);
-  Blob<TypeParam> unshared_params2;
-  unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape);
-  unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape);
-  // Make sure the diffs are non-trivial and sum to the diff in the shared net.
-  for (int i = 0; i < count; ++i) {
-    EXPECT_NE(0, ip1_weights->cpu_diff()[i]);
-    EXPECT_NE(0, ip2_weights->cpu_diff()[i]);
-    EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]);
-    EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i],
-              shared_params.cpu_diff()[i]);
-  }
-  caffe_axpy(count, TypeParam(-1), ip1_weights->cpu_diff(),
-             unshared_params1.mutable_cpu_data());
-  caffe_axpy(count, TypeParam(-1), ip2_weights->cpu_diff(),
-             unshared_params2.mutable_cpu_data());
-  const TypeParam* expected_updated_params1 = unshared_params1.cpu_data();
-  const TypeParam* expected_updated_params2 = unshared_params2.cpu_data();
-  this->net_->Update();
-  const TypeParam* actual_updated_params1 = ip1_weights->cpu_data();
-  const TypeParam* actual_updated_params2 = ip2_weights->cpu_data();
-  for (int i = 0; i < count; ++i) {
-    EXPECT_EQ(expected_updated_params1[i], actual_updated_params1[i]);
-    EXPECT_EQ(expected_updated_params2[i], actual_updated_params2[i]);
-    EXPECT_NE(actual_updated_params1[i], actual_updated_params2[i]);
-    EXPECT_NE(expected_updated_params, expected_updated_params1);
-  }
-}
-
-TYPED_TEST(NetTest, TestSharedWeightsUpdateGPU) {
+TYPED_TEST(NetTest, TestSharedWeightsUpdate) {
+  typedef typename TypeParam::Dtype Dtype;
   Caffe::set_random_seed(this->seed_);
-  Caffe::set_mode(Caffe::GPU);
   this->InitDiffDataSharedWeightsNet();
-  vector<Blob<TypeParam>*> bottom;
+  vector<Blob<Dtype>*> bottom;
   EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1");
   EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2");
-  Blob<TypeParam>* ip1_weights = this->net_->layers()[1]->blobs()[0].get();
-  Blob<TypeParam>* ip2_weights = this->net_->layers()[2]->blobs()[0].get();
+  Blob<Dtype>* ip1_weights = this->net_->layers()[1]->blobs()[0].get();
+  Blob<Dtype>* ip2_weights = this->net_->layers()[2]->blobs()[0].get();
   // Check that data blobs of shared weights share the same location in memory.
   EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data());
   // Check that diff blobs of shared weights are at different locations in
@@ -708,7 +628,7 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdateGPU) {
   this->net_->Forward(bottom);
   this->net_->Backward();
   // Compute the expected update as the data minus the two diffs.
-  Blob<TypeParam> shared_params;
+  Blob<Dtype> shared_params;
   const bool reshape = true;
   const bool copy_diff = false;
   shared_params.CopyFrom(*ip1_weights, copy_diff, reshape);
@@ -720,13 +640,13 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdateGPU) {
     EXPECT_NE(0, ip2_weights->cpu_diff()[i]);
     EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]);
   }
-  caffe_axpy(count, TypeParam(1), ip2_weights->cpu_diff(),
+  caffe_axpy(count, Dtype(1), ip2_weights->cpu_diff(),
              shared_params.mutable_cpu_diff());
-  caffe_axpy(count, TypeParam(-1), shared_params.cpu_diff(),
+  caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(),
              shared_params.mutable_cpu_data());
-  const TypeParam* expected_updated_params = shared_params.cpu_data();
+  const Dtype* expected_updated_params = shared_params.cpu_data();
   this->net_->Update();
-  const TypeParam* actual_updated_params = ip1_weights->cpu_data();
+  const Dtype* actual_updated_params = ip1_weights->cpu_data();
   for (int i = 0; i < count; ++i) {
     EXPECT_EQ(expected_updated_params[i], actual_updated_params[i]);
   }
@@ -747,10 +667,10 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdateGPU) {
   this->net_->Forward(bottom);
   this->net_->Backward();
   // Compute the expected update.
-  Blob<TypeParam> unshared_params1;
+  Blob<Dtype> unshared_params1;
   unshared_params1.CopyFrom(*ip1_weights, copy_diff, reshape);
   unshared_params1.CopyFrom(*ip1_weights, !copy_diff, reshape);
-  Blob<TypeParam> unshared_params2;
+  Blob<Dtype> unshared_params2;
   unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape);
   unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape);
   // Make sure the diffs are non-trivial and sum to the diff in the shared net.
@@ -761,15 +681,15 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdateGPU) {
     EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i],
               shared_params.cpu_diff()[i]);
   }
-  caffe_axpy(count, TypeParam(-1), ip1_weights->cpu_diff(),
+  caffe_axpy(count, Dtype(-1), ip1_weights->cpu_diff(),
              unshared_params1.mutable_cpu_data());
-  caffe_axpy(count, TypeParam(-1), ip2_weights->cpu_diff(),
+  caffe_axpy(count, Dtype(-1), ip2_weights->cpu_diff(),
              unshared_params2.mutable_cpu_data());
-  const TypeParam* expected_updated_params1 = unshared_params1.cpu_data();
-  const TypeParam* expected_updated_params2 = unshared_params2.cpu_data();
+  const Dtype* expected_updated_params1 = unshared_params1.cpu_data();
+  const Dtype* expected_updated_params2 = unshared_params2.cpu_data();
   this->net_->Update();
-  const TypeParam* actual_updated_params1 = ip1_weights->cpu_data();
-  const TypeParam* actual_updated_params2 = ip2_weights->cpu_data();
+  const Dtype* actual_updated_params1 = ip1_weights->cpu_data();
+  const Dtype* actual_updated_params2 = ip2_weights->cpu_data();
   for (int i = 0; i < count; ++i) {
     EXPECT_EQ(expected_updated_params1[i], actual_updated_params1[i]);
     EXPECT_EQ(expected_updated_params2[i], actual_updated_params2[i]);
index a439128..f444718 100644 (file)
@@ -17,8 +17,9 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class NeuronLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class NeuronLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
  protected:
   NeuronLayerTest()
       : blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
@@ -38,97 +39,41 @@ class NeuronLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(NeuronLayerTest, Dtypes);
+TYPED_TEST_CASE(NeuronLayerTest, TestDtypesAndDevices);
 
-TYPED_TEST(NeuronLayerTest, TestReLUCPU) {
+TYPED_TEST(NeuronLayerTest, TestReLU) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  ReLULayer<TypeParam> layer(layer_param);
+  ReLULayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
+  const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+  const Dtype* top_data = this->blob_top_->cpu_data();
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
     EXPECT_GE(top_data[i], 0.);
     EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]);
   }
 }
 
-
-TYPED_TEST(NeuronLayerTest, TestReLUGradientCPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  ReLULayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
-  checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestReLUGPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  ReLULayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    EXPECT_GE(top_data[i], 0.);
-    EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]);
-  }
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestReLUGradientGPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  ReLULayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
-  checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestSigmoidCPU) {
+TYPED_TEST(NeuronLayerTest, TestReLUGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  SigmoidLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i])));
-    // check that we squashed the value between 0 and 1
-    EXPECT_GE(top_data[i], 0.);
-    EXPECT_LE(top_data[i], 1.);
-  }
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestSigmoidGradientCPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  SigmoidLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
+  ReLULayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
   checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
 
-TYPED_TEST(NeuronLayerTest, TestSigmoidGPU) {
+TYPED_TEST(NeuronLayerTest, TestSigmoid) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  SigmoidLayer<TypeParam> layer(layer_param);
+  SigmoidLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
+  const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+  const Dtype* top_data = this->blob_top_->cpu_data();
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
     EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i])));
     // check that we squashed the value between 0 and 1
@@ -137,28 +82,25 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGPU) {
   }
 }
 
-
-TYPED_TEST(NeuronLayerTest, TestSigmoidGradientGPU) {
+TYPED_TEST(NeuronLayerTest, TestSigmoidGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  SigmoidLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
+  SigmoidLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
   checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
 
-
-
-TYPED_TEST(NeuronLayerTest, TestDropoutCPU) {
+TYPED_TEST(NeuronLayerTest, TestDropout) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
   Caffe::set_phase(Caffe::TRAIN);
-  DropoutLayer<TypeParam> layer(layer_param);
+  DropoutLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
+  const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+  const Dtype* top_data = this->blob_top_->cpu_data();
   float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
     if (top_data[i] != 0) {
@@ -167,37 +109,16 @@ TYPED_TEST(NeuronLayerTest, TestDropoutCPU) {
   }
 }
 
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGradientCPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  Caffe::set_phase(Caffe::TRAIN);
-  DropoutLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
-  checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGradientCPUTest) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  Caffe::set_phase(Caffe::TEST);
-  DropoutLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
-  checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(NeuronLayerTest, TestDropoutCPUTestPhase) {
+TYPED_TEST(NeuronLayerTest, TestDropoutTestPhase) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
   Caffe::set_phase(Caffe::TEST);
-  DropoutLayer<TypeParam> layer(layer_param);
+  DropoutLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
+  const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+  const Dtype* top_data = this->blob_top_->cpu_data();
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
     if (top_data[i] != 0) {
       EXPECT_EQ(top_data[i], bottom_data[i]);
@@ -205,116 +126,46 @@ TYPED_TEST(NeuronLayerTest, TestDropoutCPUTestPhase) {
   }
 }
 
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGPU) {
+TYPED_TEST(NeuronLayerTest, TestDropoutGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
   Caffe::set_phase(Caffe::TRAIN);
-  DropoutLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    if (top_data[i] != 0) {
-      EXPECT_EQ(top_data[i], bottom_data[i] * scale);
-    }
-  }
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGradientGPU) {
-    LayerParameter layer_param;
-    Caffe::set_mode(Caffe::GPU);
-    Caffe::set_phase(Caffe::TRAIN);
-    DropoutLayer<TypeParam> layer(layer_param);
-    GradientChecker<TypeParam> checker(1e-2, 1e-3);
-    // it is too expensive to call curand multiple times, so we don't do an
-    // exhaustive gradient check.
-    checker.CheckGradient(&layer, &(this->blob_bottom_vec_),
-        &(this->blob_top_vec_));
-}
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGradientGPUTest) {
-    LayerParameter layer_param;
-    Caffe::set_mode(Caffe::GPU);
-    Caffe::set_phase(Caffe::TEST);
-    DropoutLayer<TypeParam> layer(layer_param);
-    GradientChecker<TypeParam> checker(1e-2, 1e-3);
-    // it is too expensive to call curand multiple times, so we don't do an
-    // exhaustive gradient check.
-    checker.CheckGradient(&layer, &(this->blob_bottom_vec_),
-        &(this->blob_top_vec_));
+  DropoutLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3);
+  checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
+      &(this->blob_top_vec_));
 }
 
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGPUTestPhase) {
+TYPED_TEST(NeuronLayerTest, TestDropoutGradientTest) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
   Caffe::set_phase(Caffe::TEST);
-  DropoutLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    if (top_data[i] != 0) {
-      EXPECT_EQ(top_data[i], bottom_data[i]);
-    }
-  }
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestBNLLCPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  BNLLLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    EXPECT_GE(top_data[i], 0.);
-    EXPECT_GE(top_data[i], bottom_data[i]);
-  }
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestBNLLGradientCPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  BNLLLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
+  DropoutLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3);
   checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
 
-
-TYPED_TEST(NeuronLayerTest, TestBNLLGPU) {
+TYPED_TEST(NeuronLayerTest, TestBNLL) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  BNLLLayer<TypeParam> layer(layer_param);
+  BNLLLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
+  const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+  const Dtype* top_data = this->blob_top_->cpu_data();
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
     EXPECT_GE(top_data[i], 0.);
     EXPECT_GE(top_data[i], bottom_data[i]);
   }
 }
 
-
-TYPED_TEST(NeuronLayerTest, TestBNLLGradientGPU) {
+TYPED_TEST(NeuronLayerTest, TestBNLLGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  BNLLLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
+  BNLLLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3);
   checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
index fdc3ea4..b209d82 100644 (file)
@@ -17,8 +17,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class PoolingLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class PoolingLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   PoolingLayerTest()
       : blob_bottom_(new Blob<Dtype>()),
@@ -370,15 +372,15 @@ class PoolingLayerTest : public ::testing::Test {
   }
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(PoolingLayerTest, Dtypes);
+TYPED_TEST_CASE(PoolingLayerTest, TestDtypesAndDevices);
 
 TYPED_TEST(PoolingLayerTest, TestSetup) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
   pooling_param->set_kernel_size(3);
   pooling_param->set_stride(2);
-  PoolingLayer<TypeParam> layer(layer_param);
+  PoolingLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
   EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
@@ -387,13 +389,14 @@ TYPED_TEST(PoolingLayerTest, TestSetup) {
 }
 
 TYPED_TEST(PoolingLayerTest, TestSetupPadded) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
   pooling_param->set_kernel_size(3);
   pooling_param->set_stride(2);
   pooling_param->set_pad(1);
   pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
-  PoolingLayer<TypeParam> layer(layer_param);
+  PoolingLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
   EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
@@ -402,41 +405,11 @@ TYPED_TEST(PoolingLayerTest, TestSetupPadded) {
 }
 
 /*
-TYPED_TEST(PoolingLayerTest, PrintGPUBackward) {
-  LayerParameter layer_param;
-  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
-  pooling_param->set_kernel_size(3);
-  pooling_param->set_stride(2);
-  pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
-  Caffe::set_mode(Caffe::GPU);
-  PoolingLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl;
-  }
-  for (int i = 0; i < this->blob_top_->count(); ++i) {
-    cout << "top data " << i << " " << this->blob_top_->cpu_data()[i] << endl;
-  }
-
-  for (int i = 0; i < this->blob_top_->count(); ++i) {
-    this->blob_top_->mutable_cpu_diff()[i] = 1.;
-  }
-  layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_));
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl;
-  }
-}
-*/
-
-/*
-TYPED_TEST(PoolingLayerTest, PrintCPUBackward) {
+TYPED_TEST(PoolingLayerTest, PrintBackward) {
   LayerParameter layer_param;
   layer_param.set_kernelsize(3);
   layer_param.set_stride(2);
   layer_param.set_pool(LayerParameter_PoolMethod_MAX);
-  Caffe::set_mode(Caffe::CPU);
   PoolingLayer<TypeParam> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
@@ -457,56 +430,21 @@ TYPED_TEST(PoolingLayerTest, PrintCPUBackward) {
 }
 */
 
-TYPED_TEST(PoolingLayerTest, TestCPUForwardMax) {
-  Caffe::set_mode(Caffe::CPU);
-  this->TestForwardSquare();
-  this->TestForwardRectHigh();
-  this->TestForwardRectWide();
-}
-
-TYPED_TEST(PoolingLayerTest, TestGPUForwardMax) {
-  Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(PoolingLayerTest, TestForwardMax) {
   this->TestForwardSquare();
   this->TestForwardRectHigh();
   this->TestForwardRectWide();
 }
 
-TYPED_TEST(PoolingLayerTest, TestCPUForwardMaxTopMask) {
-  Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(PoolingLayerTest, TestForwardMaxTopMask) {
   this->blob_top_vec_.push_back(this->blob_top_mask_);
   this->TestForwardSquare();
   this->TestForwardRectHigh();
   this->TestForwardRectWide();
 }
 
-TYPED_TEST(PoolingLayerTest, TestGPUForwardMaxTopMask) {
-  Caffe::set_mode(Caffe::GPU);
-  this->blob_top_vec_.push_back(this->blob_top_mask_);
-  this->TestForwardSquare();
-  this->TestForwardRectHigh();
-  this->TestForwardRectWide();
-}
-
-TYPED_TEST(PoolingLayerTest, TestCPUGradientMax) {
-  for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
-    for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
-      LayerParameter layer_param;
-      PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
-      pooling_param->set_kernel_h(kernel_h);
-      pooling_param->set_kernel_w(kernel_w);
-      pooling_param->set_stride(2);
-      pooling_param->set_pad(1);
-      pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
-      Caffe::set_mode(Caffe::CPU);
-      PoolingLayer<TypeParam> layer(layer_param);
-      GradientChecker<TypeParam> checker(1e-4, 1e-2);
-      checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
-          &(this->blob_top_vec_));
-    }
-  }
-}
-
-TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) {
+TYPED_TEST(PoolingLayerTest, TestGradientMax) {
+  typedef typename TypeParam::Dtype Dtype;
   for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
     for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
       LayerParameter layer_param;
@@ -516,23 +454,22 @@ TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) {
       pooling_param->set_stride(2);
       pooling_param->set_pad(1);
       pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
-      Caffe::set_mode(Caffe::GPU);
-      PoolingLayer<TypeParam> layer(layer_param);
-      GradientChecker<TypeParam> checker(1e-4, 1e-2);
+      PoolingLayer<Dtype> layer(layer_param);
+      GradientChecker<Dtype> checker(1e-4, 1e-2);
       checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
           &(this->blob_top_vec_));
     }
   }
 }
 
-TYPED_TEST(PoolingLayerTest, TestCPUForwardMaxPadded) {
+TYPED_TEST(PoolingLayerTest, TestForwardMaxPadded) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
   pooling_param->set_kernel_size(3);
   pooling_param->set_stride(2);
   pooling_param->set_pad(2);
   pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
-  Caffe::set_mode(Caffe::CPU);
   this->blob_bottom_->Reshape(1, 1, 3, 3);
   // Input:
   //     [ 1 2 4 ]
@@ -547,14 +484,14 @@ TYPED_TEST(PoolingLayerTest, TestCPUForwardMaxPadded) {
   this->blob_bottom_->mutable_cpu_data()[6] = 4;
   this->blob_bottom_->mutable_cpu_data()[7] = 2;
   this->blob_bottom_->mutable_cpu_data()[8] = 1;
-  PoolingLayer<TypeParam> layer(layer_param);
+  PoolingLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), 1);
   EXPECT_EQ(this->blob_top_->channels(), 1);
   EXPECT_EQ(this->blob_top_->height(), 3);
   EXPECT_EQ(this->blob_top_->width(), 3);
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  TypeParam epsilon = 1e-8;
+  Dtype epsilon = 1e-8;
   // Output:
   //     [ 1 4 4 ]
   //     [ 4 4 4 ]
@@ -570,54 +507,8 @@ TYPED_TEST(PoolingLayerTest, TestCPUForwardMaxPadded) {
   EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon);
 }
 
-
-TYPED_TEST(PoolingLayerTest, TestGPUForwardMaxPadded) {
-  LayerParameter layer_param;
-  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
-  pooling_param->set_kernel_size(3);
-  pooling_param->set_stride(2);
-  pooling_param->set_pad(2);
-  pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
-  Caffe::set_mode(Caffe::GPU);
-  this->blob_bottom_->Reshape(1, 1, 3, 3);
-  // Input:
-  //     [ 1 2 4 ]
-  //     [ 2 3 2 ]
-  //     [ 4 2 1 ]
-  this->blob_bottom_->mutable_cpu_data()[0] = 1;
-  this->blob_bottom_->mutable_cpu_data()[1] = 2;
-  this->blob_bottom_->mutable_cpu_data()[2] = 4;
-  this->blob_bottom_->mutable_cpu_data()[3] = 2;
-  this->blob_bottom_->mutable_cpu_data()[4] = 3;
-  this->blob_bottom_->mutable_cpu_data()[5] = 2;
-  this->blob_bottom_->mutable_cpu_data()[6] = 4;
-  this->blob_bottom_->mutable_cpu_data()[7] = 2;
-  this->blob_bottom_->mutable_cpu_data()[8] = 1;
-  PoolingLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  EXPECT_EQ(this->blob_top_->num(), 1);
-  EXPECT_EQ(this->blob_top_->channels(), 1);
-  EXPECT_EQ(this->blob_top_->height(), 3);
-  EXPECT_EQ(this->blob_top_->width(), 3);
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  TypeParam epsilon = 1e-8;
-  // Output:
-  //     [ 1 4 4 ]
-  //     [ 4 4 4 ]
-  //     [ 4 4 1 ]
-  EXPECT_NEAR(this->blob_top_->cpu_data()[0], 1, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[2], 4, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[4], 4, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[6], 4, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon);
-}
-
-
-TYPED_TEST(PoolingLayerTest, TestCPUGradientMaxTopMask) {
+TYPED_TEST(PoolingLayerTest, TestGradientMaxTopMask) {
+  typedef typename TypeParam::Dtype Dtype;
   for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
     for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
       LayerParameter layer_param;
@@ -627,9 +518,8 @@ TYPED_TEST(PoolingLayerTest, TestCPUGradientMaxTopMask) {
       pooling_param->set_stride(2);
       pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
       this->blob_top_vec_.push_back(this->blob_top_mask_);
-      Caffe::set_mode(Caffe::CPU);
-      PoolingLayer<TypeParam> layer(layer_param);
-      GradientChecker<TypeParam> checker(1e-4, 1e-2);
+      PoolingLayer<Dtype> layer(layer_param);
+      GradientChecker<Dtype> checker(1e-4, 1e-2);
       checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
           &(this->blob_top_vec_));
       this->blob_top_vec_.pop_back();
@@ -637,48 +527,27 @@ TYPED_TEST(PoolingLayerTest, TestCPUGradientMaxTopMask) {
   }
 }
 
-TYPED_TEST(PoolingLayerTest, TestGPUGradientMaxTopMask) {
-  for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
-    for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
-      LayerParameter layer_param;
-      PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
-      pooling_param->set_kernel_h(kernel_h);
-      pooling_param->set_kernel_w(kernel_w);
-      pooling_param->set_stride(2);
-      pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
-      this->blob_top_vec_.push_back(this->blob_top_mask_);
-      Caffe::set_mode(Caffe::GPU);
-      PoolingLayer<TypeParam> layer(layer_param);
-      GradientChecker<TypeParam> checker(1e-4, 1e-2);
-      checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
-          &(this->blob_top_vec_));
-      this->blob_top_vec_.pop_back();
-    }
-  }
-}
-
-
-TYPED_TEST(PoolingLayerTest, TestCPUForwardAve) {
+TYPED_TEST(PoolingLayerTest, TestForwardAve) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
   PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
   pooling_param->set_kernel_size(3);
   pooling_param->set_stride(1);
   pooling_param->set_pad(1);
   pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
-  Caffe::set_mode(Caffe::CPU);
   this->blob_bottom_->Reshape(1, 1, 3, 3);
   FillerParameter filler_param;
-  filler_param.set_value(TypeParam(2));
-  ConstantFiller<TypeParam> filler(filler_param);
+  filler_param.set_value(Dtype(2));
+  ConstantFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_);
-  PoolingLayer<TypeParam> layer(layer_param);
+  PoolingLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), 1);
   EXPECT_EQ(this->blob_top_->channels(), 1);
   EXPECT_EQ(this->blob_top_->height(), 3);
   EXPECT_EQ(this->blob_top_->width(), 3);
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  TypeParam epsilon = 1e-5;
+  Dtype epsilon = 1e-5;
   EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon);
   EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon);
   EXPECT_NEAR(this->blob_top_->cpu_data()[2], 8.0 / 9, epsilon);
@@ -690,79 +559,8 @@ TYPED_TEST(PoolingLayerTest, TestCPUForwardAve) {
   EXPECT_NEAR(this->blob_top_->cpu_data()[8], 8.0 / 9, epsilon);
 }
 
-
-TYPED_TEST(PoolingLayerTest, TestGPUForwardAve) {
-  LayerParameter layer_param;
-  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
-  pooling_param->set_kernel_size(3);
-  pooling_param->set_stride(1);
-  pooling_param->set_pad(1);
-  pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
-  Caffe::set_mode(Caffe::GPU);
-  this->blob_bottom_->Reshape(1, 1, 3, 3);
-  FillerParameter filler_param;
-  filler_param.set_value(TypeParam(2));
-  ConstantFiller<TypeParam> filler(filler_param);
-  filler.Fill(this->blob_bottom_);
-  PoolingLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  EXPECT_EQ(this->blob_top_->num(), 1);
-  EXPECT_EQ(this->blob_top_->channels(), 1);
-  EXPECT_EQ(this->blob_top_->height(), 3);
-  EXPECT_EQ(this->blob_top_->width(), 3);
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  TypeParam epsilon = 1e-5;
-  EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[2], 8.0 / 9, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4.0 / 3, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[4], 2.0    , epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4.0 / 3, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[6], 8.0 / 9, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4.0 / 3, epsilon);
-  EXPECT_NEAR(this->blob_top_->cpu_data()[8], 8.0 / 9, epsilon);
-}
-
-
-TYPED_TEST(PoolingLayerTest, TestCPUGradientAve) {
-  for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
-    for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
-      LayerParameter layer_param;
-      PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
-      pooling_param->set_kernel_h(kernel_h);
-      pooling_param->set_kernel_w(kernel_w);
-      pooling_param->set_stride(2);
-      pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
-      Caffe::set_mode(Caffe::CPU);
-      PoolingLayer<TypeParam> layer(layer_param);
-      GradientChecker<TypeParam> checker(1e-2, 1e-2);
-      checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
-          &(this->blob_top_vec_));
-    }
-  }
-}
-
-
-TYPED_TEST(PoolingLayerTest, TestGPUGradientAve) {
-  for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
-    for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
-      LayerParameter layer_param;
-      PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
-      pooling_param->set_kernel_h(kernel_h);
-      pooling_param->set_kernel_w(kernel_w);
-      pooling_param->set_stride(2);
-      pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
-      Caffe::set_mode(Caffe::GPU);
-      PoolingLayer<TypeParam> layer(layer_param);
-      GradientChecker<TypeParam> checker(1e-2, 1e-2);
-      checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
-          &(this->blob_top_vec_));
-    }
-  }
-}
-
-
-TYPED_TEST(PoolingLayerTest, TestCPUGradientAvePadded) {
+TYPED_TEST(PoolingLayerTest, TestGradientAve) {
+  typedef typename TypeParam::Dtype Dtype;
   for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
     for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
       LayerParameter layer_param;
@@ -770,19 +568,17 @@ TYPED_TEST(PoolingLayerTest, TestCPUGradientAvePadded) {
       pooling_param->set_kernel_h(kernel_h);
       pooling_param->set_kernel_w(kernel_w);
       pooling_param->set_stride(2);
-      pooling_param->set_pad(2);
       pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
-      Caffe::set_mode(Caffe::CPU);
-      PoolingLayer<TypeParam> layer(layer_param);
-      GradientChecker<TypeParam> checker(1e-2, 1e-2);
+      PoolingLayer<Dtype> layer(layer_param);
+      GradientChecker<Dtype> checker(1e-2, 1e-2);
       checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
           &(this->blob_top_vec_));
     }
   }
 }
 
-
-TYPED_TEST(PoolingLayerTest, TestGPUGradientAvePadded) {
+TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) {
+  typedef typename TypeParam::Dtype Dtype;
   for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
     for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
       LayerParameter layer_param;
@@ -792,9 +588,8 @@ TYPED_TEST(PoolingLayerTest, TestGPUGradientAvePadded) {
       pooling_param->set_stride(2);
       pooling_param->set_pad(2);
       pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
-      Caffe::set_mode(Caffe::GPU);
-      PoolingLayer<TypeParam> layer(layer_param);
-      GradientChecker<TypeParam> checker(1e-2, 1e-2);
+      PoolingLayer<Dtype> layer(layer_param);
+      GradientChecker<Dtype> checker(1e-2, 1e-2);
       checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
           &(this->blob_top_vec_));
     }
index 99b127d..a1b716a 100644 (file)
@@ -20,8 +20,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class PowerLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class PowerLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   PowerLayerTest()
       : blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
@@ -90,166 +92,85 @@ class PowerLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(PowerLayerTest, Dtypes);
-
-TYPED_TEST(PowerLayerTest, TestPowerCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  TypeParam power = 0.37;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
-  this->TestForward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerGradientCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  TypeParam power = 0.37;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
-  this->TestBackward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZeroCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  TypeParam power = 0.37;
-  TypeParam scale = 0.83;
-  TypeParam shift = 0.0;
-  this->TestBackward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerZeroCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  TypeParam power = 0.0;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
-  this->TestForward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerZeroGradientCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  TypeParam power = 0.0;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
-  this->TestBackward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerOneCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  TypeParam power = 1.0;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
-  this->TestForward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerOneGradientCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  TypeParam power = 1.0;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
-  this->TestBackward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerTwoCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  TypeParam power = 2.0;
-  TypeParam scale = 0.34;
-  TypeParam shift = -2.4;
-  this->TestForward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerTwoGradientCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  TypeParam power = 2.0;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
-  this->TestBackward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradientCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  TypeParam power = 2.0;
-  TypeParam scale = 0.5;
-  TypeParam shift = -2.4;
-  this->TestBackward(power, scale, shift);
-}
+TYPED_TEST_CASE(PowerLayerTest, TestDtypesAndDevices);
 
-TYPED_TEST(PowerLayerTest, TestPowerGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  TypeParam power = 0.37;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPower) {
+  typedef typename TypeParam::Dtype Dtype;
+  Dtype power = 0.37;
+  Dtype scale = 0.83;
+  Dtype shift = -2.4;
   this->TestForward(power, scale, shift);
 }
 
-TYPED_TEST(PowerLayerTest, TestPowerGradientGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  TypeParam power = 0.37;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerGradient) {
+  typedef typename TypeParam::Dtype Dtype;
+  Dtype power = 0.37;
+  Dtype scale = 0.83;
+  Dtype shift = -2.4;
   this->TestBackward(power, scale, shift);
 }
 
-TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZeroGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  TypeParam power = 0.37;
-  TypeParam scale = 0.83;
-  TypeParam shift = 0.0;
+TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZero) {
+  typedef typename TypeParam::Dtype Dtype;
+  Dtype power = 0.37;
+  Dtype scale = 0.83;
+  Dtype shift = 0.0;
   this->TestBackward(power, scale, shift);
 }
 
-TYPED_TEST(PowerLayerTest, TestPowerZeroGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  TypeParam power = 0.0;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerZero) {
+  typedef typename TypeParam::Dtype Dtype;
+  Dtype power = 0.0;
+  Dtype scale = 0.83;
+  Dtype shift = -2.4;
   this->TestForward(power, scale, shift);
 }
 
-TYPED_TEST(PowerLayerTest, TestPowerZeroGradientGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  TypeParam power = 0.0;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerZeroGradient) {
+  typedef typename TypeParam::Dtype Dtype;
+  Dtype power = 0.0;
+  Dtype scale = 0.83;
+  Dtype shift = -2.4;
   this->TestBackward(power, scale, shift);
 }
 
-TYPED_TEST(PowerLayerTest, TestPowerOneGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  TypeParam power = 1.0;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerOne) {
+  typedef typename TypeParam::Dtype Dtype;
+  Dtype power = 1.0;
+  Dtype scale = 0.83;
+  Dtype shift = -2.4;
   this->TestForward(power, scale, shift);
 }
 
-TYPED_TEST(PowerLayerTest, TestPowerOneGradientGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  TypeParam power = 1.0;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerOneGradient) {
+  typedef typename TypeParam::Dtype Dtype;
+  Dtype power = 1.0;
+  Dtype scale = 0.83;
+  Dtype shift = -2.4;
   this->TestBackward(power, scale, shift);
 }
 
-TYPED_TEST(PowerLayerTest, TestPowerTwoGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  TypeParam power = 2.0;
-  TypeParam scale = 0.34;
-  TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerTwo) {
+  typedef typename TypeParam::Dtype Dtype;
+  Dtype power = 2.0;
+  Dtype scale = 0.34;
+  Dtype shift = -2.4;
   this->TestForward(power, scale, shift);
 }
 
-TYPED_TEST(PowerLayerTest, TestPowerTwoGradientGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  TypeParam power = 2.0;
-  TypeParam scale = 0.83;
-  TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerTwoGradient) {
+  typedef typename TypeParam::Dtype Dtype;
+  Dtype power = 2.0;
+  Dtype scale = 0.83;
+  Dtype shift = -2.4;
   this->TestBackward(power, scale, shift);
 }
 
-TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradientGPU) {
-  Caffe::set_mode(Caffe::GPU);
-  TypeParam power = 2.0;
-  TypeParam scale = 0.5;
-  TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradient) {
+  typedef typename TypeParam::Dtype Dtype;
+  Dtype power = 2.0;
+  Dtype scale = 0.5;
+  Dtype shift = -2.4;
   this->TestBackward(power, scale, shift);
 }
 
index 4116de4..3ab4680 100644 (file)
@@ -202,10 +202,7 @@ class RandomNumberGeneratorTest : public ::testing::Test {
   shared_ptr<SyncedMemory> int_data_2_;
 };
 
-
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(RandomNumberGeneratorTest, Dtypes);
-
+TYPED_TEST_CASE(RandomNumberGeneratorTest, TestDtypes);
 
 TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian) {
   const TypeParam mu = 0;
index 089d595..76bbfb4 100644 (file)
@@ -18,8 +18,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class SigmoidCrossEntropyLossLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   SigmoidCrossEntropyLossLayerTest()
       : blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
@@ -95,36 +97,18 @@ class SigmoidCrossEntropyLossLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(SigmoidCrossEntropyLossLayerTest, Dtypes);
-
-
-TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLossCPU) {
-  Caffe::set_mode(Caffe::CPU);
-  this->TestForward();
-}
+TYPED_TEST_CASE(SigmoidCrossEntropyLossLayerTest, TestDtypesAndDevices);
 
-TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLossGPU) {
-  Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLoss) {
   this->TestForward();
 }
 
-TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradientCPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  SigmoidCrossEntropyLossLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
-  checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_), 0, -1, -1);
-}
-
-TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradientGPU) {
+TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  SigmoidCrossEntropyLossLayer<TypeParam> layer(layer_param);
+  SigmoidCrossEntropyLossLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
+  GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
   checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_), 0, -1, -1);
 }
index 3ba302d..4a9c0f2 100644 (file)
@@ -38,12 +38,10 @@ class SoftmaxLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(SoftmaxLayerTest, Dtypes);
+TYPED_TEST_CASE(SoftmaxLayerTest, TestDtypes);
 
-TYPED_TEST(SoftmaxLayerTest, TestForwardCPU) {
+TYPED_TEST(SoftmaxLayerTest, TestForward) {
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
   SoftmaxLayer<TypeParam> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
@@ -73,9 +71,8 @@ TYPED_TEST(SoftmaxLayerTest, TestForwardCPU) {
   }
 }
 
-TYPED_TEST(SoftmaxLayerTest, TestGradientCPU) {
+TYPED_TEST(SoftmaxLayerTest, TestGradient) {
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
   SoftmaxLayer<TypeParam> layer(layer_param);
   GradientChecker<TypeParam> checker(1e-2, 1e-3);
   checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
index 8b8be8e..efd6e33 100644 (file)
@@ -19,8 +19,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class SoftmaxWithLossLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class SoftmaxWithLossLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   SoftmaxWithLossLayerTest()
       : blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
@@ -46,26 +48,15 @@ class SoftmaxWithLossLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(SoftmaxWithLossLayerTest, Dtypes);
-
+TYPED_TEST_CASE(SoftmaxWithLossLayerTest, TestDtypesAndDevices);
 
-TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientCPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  SoftmaxWithLossLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
-  checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_), 0, -1, -1);
-}
 
-TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientGPU) {
+TYPED_TEST(SoftmaxWithLossLayerTest, TestGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  SoftmaxWithLossLayer<TypeParam> layer(layer_param);
+  SoftmaxWithLossLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
+  GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
   checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_), 0, -1, -1);
 }
index 327bcf9..455fb59 100644 (file)
@@ -20,8 +20,10 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class SplitLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class SplitLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
  protected:
   SplitLayerTest()
       : blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
@@ -47,12 +49,12 @@ class SplitLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(SplitLayerTest, Dtypes);
+TYPED_TEST_CASE(SplitLayerTest, TestDtypesAndDevices);
 
 TYPED_TEST(SplitLayerTest, TestSetup) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  SplitLayer<TypeParam> layer(layer_param);
+  SplitLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_a_->num(), 2);
   EXPECT_EQ(this->blob_top_a_->channels(), 3);
@@ -64,91 +66,46 @@ TYPED_TEST(SplitLayerTest, TestSetup) {
   EXPECT_EQ(this->blob_top_b_->width(), 5);
 }
 
-TYPED_TEST(SplitLayerTest, TestCPU) {
+TYPED_TEST(SplitLayerTest, Test) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  SplitLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::CPU);
+  SplitLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    TypeParam bottom_value = this->blob_bottom_->cpu_data()[i];
+    Dtype bottom_value = this->blob_bottom_->cpu_data()[i];
     EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]);
     EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]);
   }
 }
 
-TYPED_TEST(SplitLayerTest, TestGPU) {
-  LayerParameter layer_param;
-  SplitLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::GPU);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    TypeParam bottom_value = this->blob_bottom_->cpu_data()[i];
-    EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]);
-    EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]);
-  }
-}
-
-TYPED_TEST(SplitLayerTest, TestCPUInPlace) {
-  LayerParameter layer_param;
-  SplitLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::CPU);
-  this->blob_top_vec_[0] = this->blob_bottom_vec_[0];
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    TypeParam bottom_value = this->blob_bottom_->cpu_data()[i];
-    EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]);
-  }
-}
-
-TYPED_TEST(SplitLayerTest, TestGPUInPlace) {
+TYPED_TEST(SplitLayerTest, TestInPlace) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  SplitLayer<TypeParam> layer(layer_param);
-  Caffe::set_mode(Caffe::GPU);
+  SplitLayer<Dtype> layer(layer_param);
   this->blob_top_vec_[0] = this->blob_bottom_vec_[0];
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    TypeParam bottom_value = this->blob_bottom_->cpu_data()[i];
+    Dtype bottom_value = this->blob_bottom_->cpu_data()[i];
     EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]);
   }
 }
 
-TYPED_TEST(SplitLayerTest, TestCPUGradient) {
+TYPED_TEST(SplitLayerTest, TestGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  SplitLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
-  checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(SplitLayerTest, TestGPUGradient) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  SplitLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
-  checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(SplitLayerTest, TestCPUGradientInPlace) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  SplitLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
-  this->blob_top_vec_[0] = this->blob_bottom_vec_[0];
+  SplitLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-2);
   checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
 
-TYPED_TEST(SplitLayerTest, TestGPUGradientInPlace) {
+TYPED_TEST(SplitLayerTest, TestGradientInPlace) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  SplitLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-2);
+  SplitLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-2);
   this->blob_top_vec_[0] = this->blob_bottom_vec_[0];
   checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
index 0ad8123..7a931d2 100644 (file)
@@ -49,8 +49,7 @@ class StochasticPoolingLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(StochasticPoolingLayerTest, Dtypes);
+TYPED_TEST_CASE(StochasticPoolingLayerTest, TestDtypes);
 
 TYPED_TEST(StochasticPoolingLayerTest, TestSetup) {
   LayerParameter layer_param;
index 3a75708..20bd861 100644 (file)
@@ -24,7 +24,7 @@ TEST_F(SyncedMemoryTest, TestInitialization) {
   delete p_mem;
 }
 
-TEST_F(SyncedMemoryTest, TestAllocation) {
+TEST_F(SyncedMemoryTest, TestAllocationCPUGPU) {
   SyncedMemory mem(10);
   EXPECT_TRUE(mem.cpu_data());
   EXPECT_TRUE(mem.gpu_data());
@@ -32,20 +32,32 @@ TEST_F(SyncedMemoryTest, TestAllocation) {
   EXPECT_TRUE(mem.mutable_gpu_data());
 }
 
+TEST_F(SyncedMemoryTest, TestAllocationCPU) {
+  SyncedMemory mem(10);
+  EXPECT_TRUE(mem.cpu_data());
+  EXPECT_TRUE(mem.mutable_cpu_data());
+}
+
+TEST_F(SyncedMemoryTest, TestAllocationGPU) {
+  SyncedMemory mem(10);
+  EXPECT_TRUE(mem.gpu_data());
+  EXPECT_TRUE(mem.mutable_gpu_data());
+}
+
 TEST_F(SyncedMemoryTest, TestCPUWrite) {
   SyncedMemory mem(10);
   void* cpu_data = mem.mutable_cpu_data();
   EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU);
   memset(cpu_data, 1, mem.size());
   for (int i = 0; i < mem.size(); ++i) {
-    EXPECT_EQ((reinterpret_cast<char*>(cpu_data))[i], 1);
+    EXPECT_EQ((static_cast<char*>(cpu_data))[i], 1);
   }
   // do another round
   cpu_data = mem.mutable_cpu_data();
   EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU);
   memset(cpu_data, 2, mem.size());
   for (int i = 0; i < mem.size(); ++i) {
-    EXPECT_EQ((reinterpret_cast<char*>(cpu_data))[i], 2);
+    EXPECT_EQ((static_cast<char*>(cpu_data))[i], 2);
   }
 }
 
@@ -60,21 +72,21 @@ TEST_F(SyncedMemoryTest, TestGPURead) {
   char* recovered_value = new char[10];
   caffe_gpu_memcpy(10, gpu_data, recovered_value);
   for (int i = 0; i < mem.size(); ++i) {
-    EXPECT_EQ((reinterpret_cast<char*>(recovered_value))[i], 1);
+    EXPECT_EQ((static_cast<char*>(recovered_value))[i], 1);
   }
   // do another round
   cpu_data = mem.mutable_cpu_data();
   EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU);
   memset(cpu_data, 2, mem.size());
   for (int i = 0; i < mem.size(); ++i) {
-    EXPECT_EQ((reinterpret_cast<char*>(cpu_data))[i], 2);
+    EXPECT_EQ((static_cast<char*>(cpu_data))[i], 2);
   }
   gpu_data = mem.gpu_data();
   EXPECT_EQ(mem.head(), SyncedMemory::SYNCED);
   // check if values are the same
   caffe_gpu_memcpy(10, gpu_data, recovered_value);
   for (int i = 0; i < mem.size(); ++i) {
-    EXPECT_EQ((reinterpret_cast<char*>(recovered_value))[i], 2);
+    EXPECT_EQ((static_cast<char*>(recovered_value))[i], 2);
   }
   delete[] recovered_value;
 }
@@ -86,7 +98,7 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) {
   CUDA_CHECK(cudaMemset(gpu_data, 1, mem.size()));
   const void* cpu_data = mem.cpu_data();
   for (int i = 0; i < mem.size(); ++i) {
-    EXPECT_EQ((reinterpret_cast<const char*>(cpu_data))[i], 1);
+    EXPECT_EQ((static_cast<const char*>(cpu_data))[i], 1);
   }
   EXPECT_EQ(mem.head(), SyncedMemory::SYNCED);
 
@@ -95,7 +107,7 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) {
   CUDA_CHECK(cudaMemset(gpu_data, 2, mem.size()));
   cpu_data = mem.cpu_data();
   for (int i = 0; i < mem.size(); ++i) {
-    EXPECT_EQ((reinterpret_cast<const char*>(cpu_data))[i], 2);
+    EXPECT_EQ((static_cast<const char*>(cpu_data))[i], 2);
   }
   EXPECT_EQ(mem.head(), SyncedMemory::SYNCED);
 }
index 9c9f8a7..171eb4e 100644 (file)
@@ -19,8 +19,9 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class TanHLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class TanHLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
  protected:
   TanHLayerTest()
       : blob_bottom_(new Blob<Dtype>(2, 10, 1, 1)),
@@ -39,13 +40,12 @@ class TanHLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(TanHLayerTest, Dtypes);
+TYPED_TEST_CASE(TanHLayerTest, TestDtypesAndDevices);
 
-TYPED_TEST(TanHLayerTest, TestForwardCPU) {
+TYPED_TEST(TanHLayerTest, TestForward) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  TanHLayer<TypeParam> layer(layer_param);
+  TanHLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // Test exact values
@@ -65,43 +65,11 @@ TYPED_TEST(TanHLayerTest, TestForwardCPU) {
   }
 }
 
-TYPED_TEST(TanHLayerTest, TestGradientCPU) {
+TYPED_TEST(TanHLayerTest, TestGradient) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  TanHLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
-  checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
-      &(this->blob_top_vec_));
-}
-
-TYPED_TEST(TanHLayerTest, TestForwardGPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  TanHLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  // Test exact values
-  for (int i = 0; i < this->blob_bottom_->num(); ++i) {
-    for (int j = 0; j < this->blob_bottom_->channels(); ++j) {
-      for (int k = 0; k < this->blob_bottom_->height(); ++k) {
-        for (int l = 0; l < this->blob_bottom_->width(); ++l) {
-          EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4,
-             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
-             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
-          EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4,
-             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
-             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
-        }
-      }
-    }
-  }
-}
-
-TYPED_TEST(TanHLayerTest, TestGradientGPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  TanHLayer<TypeParam> layer(layer_param);
-  GradientChecker<TypeParam> checker(1e-2, 1e-3);
+  TanHLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-2, 1e-3);
   checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
       &(this->blob_top_vec_));
 }
index 8303e44..46519ff 100644 (file)
@@ -16,8 +16,9 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-template <typename Dtype>
-class ThresholdLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class ThresholdLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
  protected:
   ThresholdLayerTest()
       : blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
@@ -37,13 +38,13 @@ class ThresholdLayerTest : public ::testing::Test {
   vector<Blob<Dtype>*> blob_top_vec_;
 };
 
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(ThresholdLayerTest, Dtypes);
+TYPED_TEST_CASE(ThresholdLayerTest, TestDtypesAndDevices);
 
 
 TYPED_TEST(ThresholdLayerTest, TestSetup) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  ThresholdLayer<TypeParam> layer(layer_param);
+  ThresholdLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
   EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
@@ -51,16 +52,16 @@ TYPED_TEST(ThresholdLayerTest, TestSetup) {
   EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_->width());
 }
 
-TYPED_TEST(ThresholdLayerTest, TestCPU) {
+TYPED_TEST(ThresholdLayerTest, Test) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
-  ThresholdLayer<TypeParam> layer(layer_param);
+  ThresholdLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  const TypeParam threshold_ = layer_param.threshold_param().threshold();
+  const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+  const Dtype* top_data = this->blob_top_->cpu_data();
+  const Dtype threshold_ = layer_param.threshold_param().threshold();
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
     EXPECT_GE(top_data[i], 0.);
     EXPECT_LE(top_data[i], 1.);
@@ -73,67 +74,19 @@ TYPED_TEST(ThresholdLayerTest, TestCPU) {
   }
 }
 
-TYPED_TEST(ThresholdLayerTest, TestCPU2) {
+TYPED_TEST(ThresholdLayerTest, Test2) {
+  typedef typename TypeParam::Dtype Dtype;
   LayerParameter layer_param;
-  Caffe::set_mode(Caffe::CPU);
   ThresholdParameter* threshold_param =
     layer_param.mutable_threshold_param();
   threshold_param->set_threshold(0.5);
-  ThresholdLayer<TypeParam> layer(layer_param);
+  ThresholdLayer<Dtype> layer(layer_param);
   layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  const TypeParam threshold_ = layer_param.threshold_param().threshold();
-  EXPECT_FLOAT_EQ(threshold_, 0.5);
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    EXPECT_GE(top_data[i], 0.);
-    EXPECT_LE(top_data[i], 1.);
-    if (top_data[i] == 0) {
-      EXPECT_LE(bottom_data[i], threshold_);
-    }
-    if (top_data[i] == 1) {
-      EXPECT_GT(bottom_data[i], threshold_);
-    }
-  }
-}
-
-TYPED_TEST(ThresholdLayerTest, TestGPU) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  ThresholdLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  const TypeParam threshold_ = layer_param.threshold_param().threshold();
-  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
-    EXPECT_GE(top_data[i], 0.);
-    EXPECT_LE(top_data[i], 1.);
-    if (top_data[i] == 0) {
-      EXPECT_LE(bottom_data[i], threshold_);
-    }
-    if (top_data[i] == 1) {
-      EXPECT_GT(bottom_data[i], threshold_);
-    }
-  }
-}
-
-TYPED_TEST(ThresholdLayerTest, TestGPU2) {
-  LayerParameter layer_param;
-  Caffe::set_mode(Caffe::GPU);
-  ThresholdParameter* threshold_param =
-    layer_param.mutable_threshold_param();
-  threshold_param->set_threshold(0.5);
-  ThresholdLayer<TypeParam> layer(layer_param);
-  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-  // Now, check values
-  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
-  const TypeParam* top_data = this->blob_top_->cpu_data();
-  const TypeParam threshold_ = layer_param.threshold_param().threshold();
+  const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+  const Dtype* top_data = this->blob_top_->cpu_data();
+  const Dtype threshold_ = layer_param.threshold_param().threshold();
   EXPECT_FLOAT_EQ(threshold_, 0.5);
   for (int i = 0; i < this->blob_bottom_->count(); ++i) {
     EXPECT_GE(top_data[i], 0.);
index 5b4c48e..725d24e 100644 (file)
@@ -15,14 +15,12 @@ namespace caffe {
 
 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
 
-typedef ::testing::Types<float, double> Dtypes;
-
-template <typename Dtype>
+template <typename TypeParam>
 class GemmTest : public ::testing::Test {};
 
-TYPED_TEST_CASE(GemmTest, Dtypes);
+TYPED_TEST_CASE(GemmTest, TestDtypes);
 
-TYPED_TEST(GemmTest, TestGemm) {
+TYPED_TEST(GemmTest, TestGemmCPUGPU) {
   Blob<TypeParam> A(1, 1, 2, 3);
   Blob<TypeParam> B(1, 1, 3, 4);
   Blob<TypeParam> C(1, 1, 2, 4);
@@ -93,7 +91,7 @@ TYPED_TEST(GemmTest, TestGemm) {
 }
 
 
-TYPED_TEST(GemmTest, TestGemv) {
+TYPED_TEST(GemmTest, TestGemvCPUGPU) {
   Blob<TypeParam> A(1, 1, 2, 3);
   Blob<TypeParam> x(1, 1, 1, 3);
   Blob<TypeParam> y(1, 1, 1, 2);