# CXX_SRCS are the source files excluding the test ones.
CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp")
# HXX_SRCS are the header files
-HXX_SRCS := $(shell find include/$(PROJECT) -name "*.hpp")
+HXX_SRCS := $(shell find include/$(PROJECT) ! -name "test_*.hpp" -name "*.hpp")
# CU_SRCS are the cuda source files
CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu")
# TEST_SRCS are the test source files
TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu")
GTEST_SRC := src/gtest/gtest-all.cpp
# TEST_HDRS are the test header files
-TEST_HDRS := $(shell find src/$(PROJECT) -name "test_*.hpp")
+TEST_HDRS := $(shell find include/$(PROJECT) -name "test_*.hpp")
# TOOL_SRCS are the source files for the tool binaries
TOOL_SRCS := $(shell find tools -name "*.cpp")
# EXAMPLE_SRCS are the source files for the example binaries
# Define build targets
##############################
.PHONY: all test clean linecount lint tools examples $(DIST_ALIASES) \
- py mat py$(PROJECT) mat$(PROJECT) proto runtest \
+ py mat py$(PROJECT) mat$(PROJECT) proto runtest runtestnogpu \
superclean supercleanlist supercleanfiles warn
all: $(NAME) $(STATIC_NAME) tools examples
runtest: $(TEST_ALL_BIN)
$(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle
+runtestnogpu: $(TEST_ALL_BIN)
+ $(TEST_ALL_BIN) --gtest_shuffle --gtest_filter="-*GPU*:*/2.*:*/3.*"
+
warn: $(EMPTY_WARN_REPORT)
$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR)
--- /dev/null
+// Copyright 2014 BVLC and contributors.
+
+// The main caffe test code. Your test cpp code should include this hpp
+// to allow a main function to be compiled into the binary.
+#ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
+#define CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
+
+#include <cuda_runtime.h>
+#include <glog/logging.h>
+#include <gtest/gtest.h>
+
+#include <cstdlib>
+#include <cstdio>
+
+#include "caffe/common.hpp"
+
+using std::cout;
+using std::endl;
+
+int main(int argc, char** argv);
+
+namespace caffe {
+
+template <typename TypeParam>
+class MultiDeviceTest : public ::testing::Test {
+ public:
+ typedef typename TypeParam::Dtype Dtype;
+ protected:
+ MultiDeviceTest() {
+ Caffe::set_mode(TypeParam::device);
+ }
+ virtual ~MultiDeviceTest() {}
+};
+
+typedef ::testing::Types<float, double> TestDtypes;
+
+struct FloatCPU {
+ typedef float Dtype;
+ static const Caffe::Brew device = Caffe::CPU;
+};
+
+struct DoubleCPU {
+ typedef double Dtype;
+ static const Caffe::Brew device = Caffe::CPU;
+};
+
+struct FloatGPU {
+ typedef float Dtype;
+ static const Caffe::Brew device = Caffe::GPU;
+};
+
+struct DoubleGPU {
+ typedef double Dtype;
+ static const Caffe::Brew device = Caffe::GPU;
+};
+
+typedef ::testing::Types<FloatCPU, DoubleCPU, FloatGPU, DoubleGPU>
+ TestDtypesAndDevices;
+
+} // namespace caffe
+
+#endif // CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
int top_k_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(AccuracyLayerTest, Dtypes);
+TYPED_TEST_CASE(AccuracyLayerTest, TestDtypes);
TYPED_TEST(AccuracyLayerTest, TestForwardCPU) {
LayerParameter layer_param;
AccuracyParameter* accuracy_param = layer_param.mutable_accuracy_param();
accuracy_param->set_top_k(this->top_k_);
- Caffe::set_mode(Caffe::CPU);
AccuracyLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(ArgMaxLayerTest, Dtypes);
-
+TYPED_TEST_CASE(ArgMaxLayerTest, TestDtypes);
TYPED_TEST(ArgMaxLayerTest, TestSetup) {
LayerParameter layer_param;
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-class BenchmarkTest : public ::testing::Test {};
+template <typename TypeParam>
+class BenchmarkTest : public MultiDeviceTest<TypeParam> {};
-TEST_F(BenchmarkTest, TestTimerConstructorCPU) {
- Caffe::set_mode(Caffe::CPU);
- Timer timer;
- EXPECT_TRUE(timer.initted());
- EXPECT_FALSE(timer.running());
- EXPECT_FALSE(timer.has_run_at_least_once());
-}
+TYPED_TEST_CASE(BenchmarkTest, TestDtypesAndDevices);
-TEST_F(BenchmarkTest, TestTimerConstructorGPU) {
- Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(BenchmarkTest, TestTimerConstructor) {
Timer timer;
EXPECT_TRUE(timer.initted());
EXPECT_FALSE(timer.running());
EXPECT_FALSE(timer.has_run_at_least_once());
}
-TEST_F(BenchmarkTest, TestTimerStartCPU) {
- Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(BenchmarkTest, TestTimerStart) {
Timer timer;
timer.Start();
EXPECT_TRUE(timer.initted());
EXPECT_TRUE(timer.has_run_at_least_once());
}
-TEST_F(BenchmarkTest, TestTimerStartGPU) {
- Caffe::set_mode(Caffe::GPU);
- Timer timer;
- timer.Start();
- EXPECT_TRUE(timer.initted());
- EXPECT_TRUE(timer.running());
- EXPECT_TRUE(timer.has_run_at_least_once());
- timer.Stop();
- timer.Start();
- EXPECT_TRUE(timer.initted());
- EXPECT_TRUE(timer.running());
- EXPECT_TRUE(timer.has_run_at_least_once());
- timer.Start();
- EXPECT_TRUE(timer.initted());
- EXPECT_TRUE(timer.running());
- EXPECT_TRUE(timer.has_run_at_least_once());
-}
-
-TEST_F(BenchmarkTest, TestTimerStopCPU) {
- Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(BenchmarkTest, TestTimerStop) {
Timer timer;
timer.Stop();
EXPECT_TRUE(timer.initted());
EXPECT_TRUE(timer.has_run_at_least_once());
}
-TEST_F(BenchmarkTest, TestTimerStopGPU) {
- Caffe::set_mode(Caffe::GPU);
- Timer timer;
- timer.Stop();
- EXPECT_TRUE(timer.initted());
- EXPECT_FALSE(timer.running());
- EXPECT_FALSE(timer.has_run_at_least_once());
- timer.Start();
- timer.Stop();
- EXPECT_TRUE(timer.initted());
- EXPECT_FALSE(timer.running());
- EXPECT_TRUE(timer.has_run_at_least_once());
- timer.Stop();
- EXPECT_TRUE(timer.initted());
- EXPECT_FALSE(timer.running());
- EXPECT_TRUE(timer.has_run_at_least_once());
-}
-
-TEST_F(BenchmarkTest, TestTimerMilliSecondsCPU) {
- Caffe::set_mode(Caffe::CPU);
- Timer timer;
- CHECK_EQ(timer.MilliSeconds(), 0);
- EXPECT_TRUE(timer.initted());
- EXPECT_FALSE(timer.running());
- EXPECT_FALSE(timer.has_run_at_least_once());
- timer.Start();
- usleep(300 * 1000);
- CHECK_GE(timer.MilliSeconds(), 298);
- CHECK_LE(timer.MilliSeconds(), 302);
- EXPECT_TRUE(timer.initted());
- EXPECT_FALSE(timer.running());
- EXPECT_TRUE(timer.has_run_at_least_once());
-}
-
-TEST_F(BenchmarkTest, TestTimerMilliSecondsGPU) {
- Caffe::set_mode(Caffe::GPU);
- Timer timer;
- CHECK_EQ(timer.MilliSeconds(), 0);
- EXPECT_TRUE(timer.initted());
- EXPECT_FALSE(timer.running());
- EXPECT_FALSE(timer.has_run_at_least_once());
- timer.Start();
- usleep(300 * 1000);
- CHECK_GE(timer.MilliSeconds(), 298);
- CHECK_LE(timer.MilliSeconds(), 302);
- EXPECT_TRUE(timer.initted());
- EXPECT_FALSE(timer.running());
- EXPECT_TRUE(timer.has_run_at_least_once());
-}
-
-TEST_F(BenchmarkTest, TestTimerSecondsCPU) {
- Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(BenchmarkTest, TestTimerMilliSeconds) {
Timer timer;
- CHECK_EQ(timer.Seconds(), 0);
+ EXPECT_EQ(timer.MilliSeconds(), 0);
EXPECT_TRUE(timer.initted());
EXPECT_FALSE(timer.running());
EXPECT_FALSE(timer.has_run_at_least_once());
timer.Start();
usleep(300 * 1000);
- CHECK_GE(timer.Seconds(), 0.298);
- CHECK_LE(timer.Seconds(), 0.302);
+ EXPECT_GE(timer.MilliSeconds(), 298);
+ EXPECT_LE(timer.MilliSeconds(), 302);
EXPECT_TRUE(timer.initted());
EXPECT_FALSE(timer.running());
EXPECT_TRUE(timer.has_run_at_least_once());
}
-TEST_F(BenchmarkTest, TestTimerSecondsGPU) {
- Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(BenchmarkTest, TestTimerSeconds) {
Timer timer;
- CHECK_EQ(timer.Seconds(), 0);
+ EXPECT_EQ(timer.Seconds(), 0);
EXPECT_TRUE(timer.initted());
EXPECT_FALSE(timer.running());
EXPECT_FALSE(timer.has_run_at_least_once());
timer.Start();
usleep(300 * 1000);
- CHECK_GE(timer.Seconds(), 0.298);
- CHECK_LE(timer.Seconds(), 0.302);
+ EXPECT_GE(timer.Seconds(), 0.298);
+ EXPECT_LE(timer.Seconds(), 0.302);
EXPECT_TRUE(timer.initted());
EXPECT_FALSE(timer.running());
EXPECT_TRUE(timer.has_run_at_least_once());
Blob<Dtype>* const blob_preshaped_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(BlobSimpleTest, Dtypes);
+TYPED_TEST_CASE(BlobSimpleTest, TestDtypes);
TYPED_TEST(BlobSimpleTest, TestInitialization) {
EXPECT_TRUE(this->blob_);
EXPECT_EQ(this->blob_->count(), 0);
}
-TYPED_TEST(BlobSimpleTest, TestPointers) {
+TYPED_TEST(BlobSimpleTest, TestPointersCPUGPU) {
EXPECT_TRUE(this->blob_preshaped_->gpu_data());
EXPECT_TRUE(this->blob_preshaped_->cpu_data());
EXPECT_TRUE(this->blob_preshaped_->mutable_gpu_data());
// The main caffe test code. Your test cpp code should include this hpp
// to allow a main function to be compiled into the binary.
-#include "test_caffe_main.hpp"
+#include "caffe/test/test_caffe_main.hpp"
namespace caffe {
cudaDeviceProp CAFFE_TEST_CUDA_PROP;
+++ /dev/null
-// Copyright 2014 BVLC and contributors.
-
-// The main caffe test code. Your test cpp code should include this hpp
-// to allow a main function to be compiled into the binary.
-#ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
-#define CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
-
-#include <cuda_runtime.h>
-#include <glog/logging.h>
-#include <gtest/gtest.h>
-
-#include <cstdlib>
-#include <cstdio>
-
-using std::cout;
-using std::endl;
-
-int main(int argc, char** argv);
-
-#endif // CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
class CommonTest : public ::testing::Test {};
-TEST_F(CommonTest, TestCublasHandler) {
+TEST_F(CommonTest, TestCublasHandlerGPU) {
int cuda_device_id;
CUDA_CHECK(cudaGetDevice(&cuda_device_id));
EXPECT_TRUE(Caffe::cublas_handle());
SyncedMemory data_b(10 * sizeof(unsigned int));
Caffe::set_random_seed(1701);
CURAND_CHECK(curandGenerate(Caffe::curand_generator(),
- reinterpret_cast<unsigned int*>(data_a.mutable_gpu_data()), 10));
+ static_cast<unsigned int*>(data_a.mutable_gpu_data()), 10));
Caffe::set_random_seed(1701);
CURAND_CHECK(curandGenerate(Caffe::curand_generator(),
- reinterpret_cast<unsigned int*>(data_b.mutable_gpu_data()), 10));
+ static_cast<unsigned int*>(data_b.mutable_gpu_data()), 10));
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i],
((const unsigned int*)(data_b.cpu_data()))[i]);
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class ConcatLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class ConcatLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
ConcatLayerTest()
: blob_bottom_0(new Blob<Dtype>(2, 3, 6, 5)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(ConcatLayerTest, Dtypes);
+TYPED_TEST_CASE(ConcatLayerTest, TestDtypesAndDevices);
TYPED_TEST(ConcatLayerTest, TestSetupNum) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
layer_param.mutable_concat_param()->set_concat_dim(0);
- ConcatLayer<TypeParam> layer(layer_param);
+ ConcatLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_1, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(),
this->blob_bottom_0->num() + this->blob_bottom_2->num());
}
TYPED_TEST(ConcatLayerTest, TestSetupChannels) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- ConcatLayer<TypeParam> layer(layer_param);
+ ConcatLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num());
EXPECT_EQ(this->blob_top_->channels(),
}
-TYPED_TEST(ConcatLayerTest, TestCPUNum) {
+TYPED_TEST(ConcatLayerTest, TestNum) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- ConcatLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::CPU);
+ ConcatLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_0, &(this->blob_top_vec_));
for (int n = 0; n < this->blob_top_->num(); ++n) {
}
}
-
-TYPED_TEST(ConcatLayerTest, TestCPUGradient) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- ConcatLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradient(&layer, &(this->blob_bottom_vec_0),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(ConcatLayerTest, TestGPUGradient) {
+TYPED_TEST(ConcatLayerTest, TestGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- ConcatLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
+ ConcatLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3);
checker.CheckGradient(&layer, &(this->blob_bottom_vec_0),
&(this->blob_top_vec_));
}
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class ConvolutionLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class ConvolutionLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
ConvolutionLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 6, 4)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(ConvolutionLayerTest, Dtypes);
+TYPED_TEST_CASE(ConvolutionLayerTest, TestDtypesAndDevices);
TYPED_TEST(ConvolutionLayerTest, TestSetup) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_num_output(4);
this->blob_bottom_vec_.push_back(this->blob_bottom_2_);
this->blob_top_vec_.push_back(this->blob_top_2_);
- shared_ptr<Layer<TypeParam> > layer(
- new ConvolutionLayer<TypeParam>(layer_param));
+ shared_ptr<Layer<Dtype> > layer(
+ new ConvolutionLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 4);
// setting group should not change the shape
convolution_param->set_num_output(3);
convolution_param->set_group(3);
- layer.reset(new ConvolutionLayer<TypeParam>(layer_param));
+ layer.reset(new ConvolutionLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 3);
EXPECT_EQ(this->blob_top_2_->width(), 1);
}
-TYPED_TEST(ConvolutionLayerTest, TestCPUSimpleConvolution) {
+TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) {
// We will simply see if the convolution layer carries out averaging well.
- shared_ptr<ConstantFiller<TypeParam> > filler;
+ typedef typename TypeParam::Dtype Dtype;
+ shared_ptr<ConstantFiller<Dtype> > filler;
FillerParameter filler_param;
filler_param.set_value(1.);
- filler.reset(new ConstantFiller<TypeParam>(filler_param));
+ filler.reset(new ConstantFiller<Dtype>(filler_param));
filler->Fill(this->blob_bottom_);
filler_param.set_value(2.);
- filler.reset(new ConstantFiller<TypeParam>(filler_param));
+ filler.reset(new ConstantFiller<Dtype>(filler_param));
filler->Fill(this->blob_bottom_2_);
this->blob_bottom_vec_.push_back(this->blob_bottom_2_);
this->blob_top_vec_.push_back(this->blob_top_2_);
convolution_param->mutable_weight_filler()->set_value(1);
convolution_param->mutable_bias_filler()->set_type("constant");
convolution_param->mutable_bias_filler()->set_value(0.1);
- shared_ptr<Layer<TypeParam> > layer(
- new ConvolutionLayer<TypeParam>(layer_param));
+ shared_ptr<Layer<Dtype> > layer(
+ new ConvolutionLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- Caffe::set_mode(Caffe::CPU);
layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// After the convolution, the output should all have output values 27.1
- const TypeParam* top_data = this->blob_top_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
for (int i = 0; i < this->blob_top_->count(); ++i) {
EXPECT_NEAR(top_data[i], 27.1, 1e-4);
}
}
}
-TYPED_TEST(ConvolutionLayerTest, TestGPUSimpleConvolution) {
- // We will simply see if the convolution layer carries out averaging well.
- shared_ptr<ConstantFiller<TypeParam> > filler;
- FillerParameter filler_param;
- filler_param.set_value(1.);
- filler.reset(new ConstantFiller<TypeParam>(filler_param));
- filler->Fill(this->blob_bottom_);
- filler_param.set_value(2.);
- filler.reset(new ConstantFiller<TypeParam>(filler_param));
- filler->Fill(this->blob_bottom_2_);
- this->blob_bottom_vec_.push_back(this->blob_bottom_2_);
- this->blob_top_vec_.push_back(this->blob_top_2_);
- LayerParameter layer_param;
- ConvolutionParameter* convolution_param =
- layer_param.mutable_convolution_param();
- convolution_param->set_kernel_size(3);
- convolution_param->set_stride(2);
- convolution_param->set_num_output(4);
- convolution_param->mutable_weight_filler()->set_type("constant");
- convolution_param->mutable_weight_filler()->set_value(1);
- convolution_param->mutable_bias_filler()->set_type("constant");
- convolution_param->mutable_bias_filler()->set_value(0.1);
- shared_ptr<Layer<TypeParam> > layer(
- new ConvolutionLayer<TypeParam>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- Caffe::set_mode(Caffe::GPU);
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- // After the convolution, the output should all have output values 27.1
- const TypeParam* top_data = this->blob_top_->cpu_data();
- for (int i = 0; i < this->blob_top_->count(); ++i) {
- EXPECT_NEAR(top_data[i], 27.1, 1e-4);
- }
- top_data = this->blob_top_2_->cpu_data();
- for (int i = 0; i < this->blob_top_2_->count(); ++i) {
- EXPECT_NEAR(top_data[i], 54.1, 1e-4);
- }
-}
-
-TYPED_TEST(ConvolutionLayerTest, TestCPUSimpleConvolutionGroup) {
- // We will simply see if the convolution layer carries out averaging well.
- FillerParameter filler_param;
- filler_param.set_value(1.);
- ConstantFiller<TypeParam> filler(filler_param);
- filler.Fill(this->blob_bottom_);
- TypeParam* bottom_data = this->blob_bottom_->mutable_cpu_data();
- for (int n = 0; n < this->blob_bottom_->num(); ++n) {
- for (int c = 0; c < this->blob_bottom_->channels(); ++c) {
- for (int h = 0; h < this->blob_bottom_->height(); ++h) {
- for (int w = 0; w < this->blob_bottom_->width(); ++w) {
- bottom_data[this->blob_bottom_->offset(n, c, h, w)] = c;
- }
- }
- }
- }
- LayerParameter layer_param;
- ConvolutionParameter* convolution_param =
- layer_param.mutable_convolution_param();
- convolution_param->set_kernel_size(3);
- convolution_param->set_stride(2);
- convolution_param->set_num_output(3);
- convolution_param->set_group(3);
- convolution_param->mutable_weight_filler()->set_type("constant");
- convolution_param->mutable_weight_filler()->set_value(1);
- convolution_param->mutable_bias_filler()->set_type("constant");
- convolution_param->mutable_bias_filler()->set_value(0.1);
- shared_ptr<Layer<TypeParam> > layer(
- new ConvolutionLayer<TypeParam>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- Caffe::set_mode(Caffe::CPU);
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- // After the convolution, the output should all have output values 9.1
- const TypeParam* top_data = this->blob_top_->cpu_data();
- for (int n = 0; n < this->blob_top_->num(); ++n) {
- for (int c = 0; c < this->blob_top_->channels(); ++c) {
- for (int h = 0; h < this->blob_top_->height(); ++h) {
- for (int w = 0; w < this->blob_top_->width(); ++w) {
- TypeParam data = top_data[this->blob_top_->offset(n, c, h, w)];
- EXPECT_NEAR(data, c * 9 + 0.1, 1e-4);
- }
- }
- }
- }
-}
-
-
-TYPED_TEST(ConvolutionLayerTest, TestGPUSimpleConvolutionGroup) {
+TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) {
// We will simply see if the convolution layer carries out averaging well.
+ typedef typename TypeParam::Dtype Dtype;
FillerParameter filler_param;
filler_param.set_value(1.);
- ConstantFiller<TypeParam> filler(filler_param);
+ ConstantFiller<Dtype> filler(filler_param);
filler.Fill(this->blob_bottom_);
- TypeParam* bottom_data = this->blob_bottom_->mutable_cpu_data();
+ Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data();
for (int n = 0; n < this->blob_bottom_->num(); ++n) {
for (int c = 0; c < this->blob_bottom_->channels(); ++c) {
for (int h = 0; h < this->blob_bottom_->height(); ++h) {
convolution_param->mutable_weight_filler()->set_value(1);
convolution_param->mutable_bias_filler()->set_type("constant");
convolution_param->mutable_bias_filler()->set_value(0.1);
- shared_ptr<Layer<TypeParam> > layer(
- new ConvolutionLayer<TypeParam>(layer_param));
+ shared_ptr<Layer<Dtype> > layer(
+ new ConvolutionLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- Caffe::set_mode(Caffe::GPU);
layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// After the convolution, the output should all have output values 9.1
- const TypeParam* top_data = this->blob_top_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
for (int n = 0; n < this->blob_top_->num(); ++n) {
for (int c = 0; c < this->blob_top_->channels(); ++c) {
for (int h = 0; h < this->blob_top_->height(); ++h) {
for (int w = 0; w < this->blob_top_->width(); ++w) {
- TypeParam data = top_data[this->blob_top_->offset(n, c, h, w)];
+ Dtype data = top_data[this->blob_top_->offset(n, c, h, w)];
EXPECT_NEAR(data, c * 9 + 0.1, 1e-4);
}
}
}
}
-
-TYPED_TEST(ConvolutionLayerTest, TestCPUGradient) {
- LayerParameter layer_param;
- ConvolutionParameter* convolution_param =
- layer_param.mutable_convolution_param();
- this->blob_bottom_vec_.push_back(this->blob_bottom_2_);
- this->blob_top_vec_.push_back(this->blob_top_2_);
- convolution_param->set_kernel_size(3);
- convolution_param->set_stride(2);
- convolution_param->set_num_output(2);
- convolution_param->mutable_weight_filler()->set_type("gaussian");
- convolution_param->mutable_bias_filler()->set_type("gaussian");
- Caffe::set_mode(Caffe::CPU);
- ConvolutionLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(ConvolutionLayerTest, TestCPUGradientGroup) {
- LayerParameter layer_param;
- ConvolutionParameter* convolution_param =
- layer_param.mutable_convolution_param();
- convolution_param->set_kernel_size(3);
- convolution_param->set_stride(2);
- convolution_param->set_num_output(3);
- convolution_param->set_group(3);
- convolution_param->mutable_weight_filler()->set_type("gaussian");
- convolution_param->mutable_bias_filler()->set_type("gaussian");
- Caffe::set_mode(Caffe::CPU);
- ConvolutionLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) {
+TYPED_TEST(ConvolutionLayerTest, TestGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_num_output(2);
convolution_param->mutable_weight_filler()->set_type("gaussian");
convolution_param->mutable_bias_filler()->set_type("gaussian");
- Caffe::set_mode(Caffe::GPU);
- ConvolutionLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
+ ConvolutionLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3);
checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
-TYPED_TEST(ConvolutionLayerTest, TestGPUGradientGroup) {
+TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_group(3);
convolution_param->mutable_weight_filler()->set_type("gaussian");
convolution_param->mutable_bias_filler()->set_type("gaussian");
- Caffe::set_mode(Caffe::GPU);
- ConvolutionLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
+ ConvolutionLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3);
checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class DataLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class DataLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
DataLayerTest()
: backend_(DataParameter_DB_LEVELDB),
int seed_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(DataLayerTest, Dtypes);
+TYPED_TEST_CASE(DataLayerTest, TestDtypesAndDevices);
-TYPED_TEST(DataLayerTest, TestReadLevelDBCPU) {
- Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(DataLayerTest, TestReadLevelDB) {
const bool unique_pixels = false; // all pixels the same; images different
this->FillLevelDB(unique_pixels);
this->TestRead();
}
-TYPED_TEST(DataLayerTest, TestReadLevelDBGPU) {
- Caffe::set_mode(Caffe::GPU);
- const bool unique_pixels = false; // all pixels the same; images different
- this->FillLevelDB(unique_pixels);
- this->TestRead();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDBCPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDB) {
Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::CPU);
const bool unique_pixels = true; // all images the same; pixels different
this->FillLevelDB(unique_pixels);
this->TestReadCrop();
}
-TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDBGPU) {
- Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::GPU);
- const bool unique_pixels = true; // all images the same; pixels different
- this->FillLevelDB(unique_pixels);
- this->TestReadCrop();
-}
-
-// Test that the sequence of random crops is consistent when using
-// Caffe::set_random_seed.
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDBCPU) {
- Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::CPU);
- const bool unique_pixels = true; // all images the same; pixels different
- this->FillLevelDB(unique_pixels);
- this->TestReadCropTrainSequenceSeeded();
-}
-
// Test that the sequence of random crops is consistent when using
// Caffe::set_random_seed.
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDBGPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDB) {
Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::GPU);
const bool unique_pixels = true; // all images the same; pixels different
this->FillLevelDB(unique_pixels);
this->TestReadCropTrainSequenceSeeded();
// Test that the sequence of random crops differs across iterations when
// Caffe::set_random_seed isn't called (and seeds from srand are ignored).
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDBCPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDB) {
Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::CPU);
const bool unique_pixels = true; // all images the same; pixels different
this->FillLevelDB(unique_pixels);
this->TestReadCropTrainSequenceUnseeded();
}
-// Test that the sequence of random crops differs across iterations when
-// Caffe::set_random_seed isn't called (and seeds from srand are ignored).
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDBGPU) {
- Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::GPU);
- const bool unique_pixels = true; // all images the same; pixels different
- this->FillLevelDB(unique_pixels);
- this->TestReadCropTrainSequenceUnseeded();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTestLevelDBCPU) {
- Caffe::set_phase(Caffe::TEST);
- Caffe::set_mode(Caffe::CPU);
- const bool unique_pixels = true; // all images the same; pixels different
- this->FillLevelDB(unique_pixels);
- this->TestReadCrop();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTestLevelDBGPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTestLevelDB) {
Caffe::set_phase(Caffe::TEST);
- Caffe::set_mode(Caffe::GPU);
const bool unique_pixels = true; // all images the same; pixels different
this->FillLevelDB(unique_pixels);
this->TestReadCrop();
}
-TYPED_TEST(DataLayerTest, TestReadLMDBCPU) {
- Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(DataLayerTest, TestReadLMDB) {
const bool unique_pixels = false; // all pixels the same; images different
this->FillLMDB(unique_pixels);
this->TestRead();
}
-TYPED_TEST(DataLayerTest, TestReadLMDBGPU) {
- Caffe::set_mode(Caffe::GPU);
- const bool unique_pixels = false; // all pixels the same; images different
- this->FillLMDB(unique_pixels);
- this->TestRead();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTrainLMDBCPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainLMDB) {
Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::CPU);
- const bool unique_pixels = true; // all images the same; pixels different
- this->FillLMDB(unique_pixels);
- this->TestReadCrop();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTrainLMDBGPU) {
- Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::GPU);
const bool unique_pixels = true; // all images the same; pixels different
this->FillLMDB(unique_pixels);
this->TestReadCrop();
// Test that the sequence of random crops is consistent when using
// Caffe::set_random_seed.
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDBCPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDB) {
Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::CPU);
const bool unique_pixels = true; // all images the same; pixels different
this->FillLMDB(unique_pixels);
this->TestReadCropTrainSequenceSeeded();
}
-// Test that the sequence of random crops is consistent when using
-// Caffe::set_random_seed.
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDBGPU) {
- Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::GPU);
- const bool unique_pixels = true; // all images the same; pixels different
- this->FillLMDB(unique_pixels);
- this->TestReadCropTrainSequenceSeeded();
-}
-
-// Test that the sequence of random crops differs across iterations when
-// Caffe::set_random_seed isn't called (and seeds from srand are ignored).
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDBCPU) {
- Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::CPU);
- const bool unique_pixels = true; // all images the same; pixels different
- this->FillLMDB(unique_pixels);
- this->TestReadCropTrainSequenceUnseeded();
-}
-
// Test that the sequence of random crops differs across iterations when
// Caffe::set_random_seed isn't called (and seeds from srand are ignored).
-TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDBGPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDB) {
Caffe::set_phase(Caffe::TRAIN);
- Caffe::set_mode(Caffe::GPU);
const bool unique_pixels = true; // all images the same; pixels different
this->FillLMDB(unique_pixels);
this->TestReadCropTrainSequenceUnseeded();
}
-TYPED_TEST(DataLayerTest, TestReadCropTestLMDBCPU) {
- Caffe::set_phase(Caffe::TEST);
- Caffe::set_mode(Caffe::CPU);
- const bool unique_pixels = true; // all images the same; pixels different
- this->FillLMDB(unique_pixels);
- this->TestReadCrop();
-}
-
-TYPED_TEST(DataLayerTest, TestReadCropTestLMDBGPU) {
+TYPED_TEST(DataLayerTest, TestReadCropTestLMDB) {
Caffe::set_phase(Caffe::TEST);
- Caffe::set_mode(Caffe::GPU);
const bool unique_pixels = true; // all images the same; pixels different
this->FillLMDB(unique_pixels);
this->TestReadCrop();
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(DummyDataLayerTest, Dtypes);
+TYPED_TEST_CASE(DummyDataLayerTest, TestDtypes);
TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) {
Caffe::set_mode(Caffe::CPU);
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class EltwiseLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class EltwiseLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
EltwiseLayerTest()
: blob_bottom_a_(new Blob<Dtype>(2, 3, 4, 5)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(EltwiseLayerTest, Dtypes);
+TYPED_TEST_CASE(EltwiseLayerTest, TestDtypesAndDevices);
TYPED_TEST(EltwiseLayerTest, TestSetUp) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD);
- shared_ptr<EltwiseLayer<TypeParam> > layer(
- new EltwiseLayer<TypeParam>(layer_param));
+ shared_ptr<EltwiseLayer<Dtype> > layer(
+ new EltwiseLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 3);
EXPECT_EQ(this->blob_top_->width(), 5);
}
-TYPED_TEST(EltwiseLayerTest, TestProdCPU) {
- Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(EltwiseLayerTest, TestProd) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD);
- shared_ptr<EltwiseLayer<TypeParam> > layer(
- new EltwiseLayer<TypeParam>(layer_param));
+ shared_ptr<EltwiseLayer<Dtype> > layer(
+ new EltwiseLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- const TypeParam* data = this->blob_top_->cpu_data();
+ const Dtype* data = this->blob_top_->cpu_data();
const int count = this->blob_top_->count();
- const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
- const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
- const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
+ const Dtype* in_data_a = this->blob_bottom_a_->cpu_data();
+ const Dtype* in_data_b = this->blob_bottom_b_->cpu_data();
+ const Dtype* in_data_c = this->blob_bottom_c_->cpu_data();
for (int i = 0; i < count; ++i) {
EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]);
}
}
-TYPED_TEST(EltwiseLayerTest, TestSumCPU) {
- Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(EltwiseLayerTest, TestSum) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
- shared_ptr<EltwiseLayer<TypeParam> > layer(
- new EltwiseLayer<TypeParam>(layer_param));
+ shared_ptr<EltwiseLayer<Dtype> > layer(
+ new EltwiseLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- const TypeParam* data = this->blob_top_->cpu_data();
+ const Dtype* data = this->blob_top_->cpu_data();
const int count = this->blob_top_->count();
- const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
- const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
- const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
+ const Dtype* in_data_a = this->blob_bottom_a_->cpu_data();
+ const Dtype* in_data_b = this->blob_bottom_b_->cpu_data();
+ const Dtype* in_data_c = this->blob_bottom_c_->cpu_data();
for (int i = 0; i < count; ++i) {
EXPECT_EQ(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i]);
}
}
-TYPED_TEST(EltwiseLayerTest, TestSumCoeffCPU) {
- Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(EltwiseLayerTest, TestSumCoeff) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
eltwise_param->add_coeff(1);
eltwise_param->add_coeff(-0.5);
eltwise_param->add_coeff(2);
- shared_ptr<EltwiseLayer<TypeParam> > layer(
- new EltwiseLayer<TypeParam>(layer_param));
+ shared_ptr<EltwiseLayer<Dtype> > layer(
+ new EltwiseLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- const TypeParam* data = this->blob_top_->cpu_data();
+ const Dtype* data = this->blob_top_->cpu_data();
const int count = this->blob_top_->count();
- const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
- const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
- const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
+ const Dtype* in_data_a = this->blob_bottom_a_->cpu_data();
+ const Dtype* in_data_b = this->blob_bottom_b_->cpu_data();
+ const Dtype* in_data_c = this->blob_bottom_c_->cpu_data();
for (int i = 0; i < count; ++i) {
EXPECT_NEAR(data[i], in_data_a[i] - 0.5*in_data_b[i] + 2*in_data_c[i],
1e-4);
}
}
-TYPED_TEST(EltwiseLayerTest, TestProdGPU) {
- Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(EltwiseLayerTest, TestProdGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD);
- shared_ptr<EltwiseLayer<TypeParam> > layer(
- new EltwiseLayer<TypeParam>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- const TypeParam* data = this->blob_top_->cpu_data();
- const int count = this->blob_top_->count();
- const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
- const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
- const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
- for (int i = 0; i < count; ++i) {
- EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]);
- }
-}
-
-TYPED_TEST(EltwiseLayerTest, TestSumGPU) {
- Caffe::set_mode(Caffe::GPU);
- LayerParameter layer_param;
- EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
- eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
- shared_ptr<EltwiseLayer<TypeParam> > layer(
- new EltwiseLayer<TypeParam>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- const TypeParam* data = this->blob_top_->cpu_data();
- const int count = this->blob_top_->count();
- const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
- const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
- const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
- for (int i = 0; i < count; ++i) {
- EXPECT_EQ(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i]);
- }
-}
-
-TYPED_TEST(EltwiseLayerTest, TestSumCoeffGPU) {
- Caffe::set_mode(Caffe::GPU);
- LayerParameter layer_param;
- EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
- eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
- eltwise_param->add_coeff(1);
- eltwise_param->add_coeff(-0.5);
- eltwise_param->add_coeff(2);
- shared_ptr<EltwiseLayer<TypeParam> > layer(
- new EltwiseLayer<TypeParam>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- const TypeParam* data = this->blob_top_->cpu_data();
- const int count = this->blob_top_->count();
- const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
- const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
- const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
- for (int i = 0; i < count; ++i) {
- EXPECT_NEAR(data[i], in_data_a[i] - 0.5*in_data_b[i] + 2*in_data_c[i],
- 1e-4);
- }
-}
-
-TYPED_TEST(EltwiseLayerTest, TestProdCPUGradient) {
- Caffe::set_mode(Caffe::CPU);
- LayerParameter layer_param;
- EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
- eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD);
- EltwiseLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(EltwiseLayerTest, TestSumCPUGradient) {
- Caffe::set_mode(Caffe::CPU);
- LayerParameter layer_param;
- EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
- eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
- EltwiseLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(EltwiseLayerTest, TestSumCoeffCPUGradient) {
- Caffe::set_mode(Caffe::CPU);
- LayerParameter layer_param;
- EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
- eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
- eltwise_param->add_coeff(1);
- eltwise_param->add_coeff(-0.5);
- eltwise_param->add_coeff(2);
- EltwiseLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
+ EltwiseLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3);
checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
-TYPED_TEST(EltwiseLayerTest, TestSumGPUGradient) {
- Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(EltwiseLayerTest, TestSumGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
- EltwiseLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
+ EltwiseLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3);
checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
-TYPED_TEST(EltwiseLayerTest, TestSumCoeffGPUGradient) {
- Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(EltwiseLayerTest, TestSumCoeffGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param();
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
eltwise_param->add_coeff(1);
eltwise_param->add_coeff(-0.5);
eltwise_param->add_coeff(2);
- EltwiseLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
+ EltwiseLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3);
checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class EuclideanLossLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class EuclideanLossLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
EuclideanLossLayerTest()
: blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(EuclideanLossLayerTest, Dtypes);
+TYPED_TEST_CASE(EuclideanLossLayerTest, TestDtypesAndDevices);
-TYPED_TEST(EuclideanLossLayerTest, TestGradientCPU) {
- Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(EuclideanLossLayerTest, TestGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- EuclideanLossLayer<TypeParam> layer(layer_param);
+ EuclideanLossLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
+ GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_), -1, -1, -1);
}
-TYPED_TEST(EuclideanLossLayerTest, TestGradientGPU) {
- Caffe::set_mode(Caffe::GPU);
- LayerParameter layer_param;
- EuclideanLossLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
- checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), -1, -1, -1);
-}
} // namespace caffe
namespace caffe {
-typedef ::testing::Types<float, double> Dtypes;
-
template <typename Dtype>
class ConstantFillerTest : public ::testing::Test {
protected:
shared_ptr<ConstantFiller<Dtype> > filler_;
};
-TYPED_TEST_CASE(ConstantFillerTest, Dtypes);
+TYPED_TEST_CASE(ConstantFillerTest, TestDtypes);
TYPED_TEST(ConstantFillerTest, TestFill) {
EXPECT_TRUE(this->blob_);
shared_ptr<UniformFiller<Dtype> > filler_;
};
-TYPED_TEST_CASE(UniformFillerTest, Dtypes);
+TYPED_TEST_CASE(UniformFillerTest, TestDtypes);
TYPED_TEST(UniformFillerTest, TestFill) {
EXPECT_TRUE(this->blob_);
shared_ptr<PositiveUnitballFiller<Dtype> > filler_;
};
-TYPED_TEST_CASE(PositiveUnitballFillerTest, Dtypes);
+TYPED_TEST_CASE(PositiveUnitballFillerTest, TestDtypes);
TYPED_TEST(PositiveUnitballFillerTest, TestFill) {
EXPECT_TRUE(this->blob_);
shared_ptr<GaussianFiller<Dtype> > filler_;
};
-TYPED_TEST_CASE(GaussianFillerTest, Dtypes);
+TYPED_TEST_CASE(GaussianFillerTest, TestDtypes);
TYPED_TEST(GaussianFillerTest, TestFill) {
EXPECT_TRUE(this->blob_);
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class FlattenLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class FlattenLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
protected:
FlattenLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(FlattenLayerTest, Dtypes);
+TYPED_TEST_CASE(FlattenLayerTest, TestDtypesAndDevices);
TYPED_TEST(FlattenLayerTest, TestSetup) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- FlattenLayer<TypeParam> layer(layer_param);
+ FlattenLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5);
EXPECT_EQ(this->blob_top_->width(), 1);
}
-TYPED_TEST(FlattenLayerTest, TestCPU) {
+TYPED_TEST(FlattenLayerTest, Test) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- FlattenLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::CPU);
+ FlattenLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
for (int c = 0; c < 3 * 6 * 5; ++c) {
}
}
-TYPED_TEST(FlattenLayerTest, TestGPU) {
+TYPED_TEST(FlattenLayerTest, TestGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- FlattenLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::GPU);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- for (int c = 0; c < 3 * 6 * 5; ++c) {
- EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0),
- this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5));
- EXPECT_EQ(this->blob_top_->data_at(1, c, 0, 0),
- this->blob_bottom_->data_at(1, c / (6 * 5), (c / 5) % 6, c % 5));
- }
-}
-
-TYPED_TEST(FlattenLayerTest, TestCPUGradient) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- FlattenLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(FlattenLayerTest, TestGPUGradient) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- FlattenLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
+ FlattenLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-2);
checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template<typename Dtype>
-class HDF5OutputLayerTest : public ::testing::Test {
+template<typename TypeParam>
+class HDF5OutputLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
HDF5OutputLayerTest()
: output_file_name_(tmpnam(NULL)),
int width_;
};
-template<typename Dtype>
-void HDF5OutputLayerTest<Dtype>::CheckBlobEqual(const Blob<Dtype>& b1,
- const Blob<Dtype>& b2) {
+template<typename TypeParam>
+void HDF5OutputLayerTest<TypeParam>::CheckBlobEqual(const Blob<Dtype>& b1,
+ const Blob<Dtype>& b2) {
EXPECT_EQ(b1.num(), b2.num());
EXPECT_EQ(b1.channels(), b2.channels());
EXPECT_EQ(b1.height(), b2.height());
}
}
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(HDF5OutputLayerTest, Dtypes);
+TYPED_TEST_CASE(HDF5OutputLayerTest, TestDtypesAndDevices);
-/*
- * TestCPUForward and TestGPUForward are almost identical except for the mode.
- * They are separated to use with `test_all.testbin --gtest_filter="*CPU*"`.
- */
-TYPED_TEST(HDF5OutputLayerTest, TestCPUForward) {
+TYPED_TEST(HDF5OutputLayerTest, TestForward) {
+ typedef typename TypeParam::Dtype Dtype;
LOG(INFO) << "Loading HDF5 file " << this->input_file_name_;
hid_t file_id = H5Fopen(this->input_file_name_.c_str(), H5F_ACC_RDONLY,
H5P_DEFAULT);
this->blob_bottom_vec_.push_back(this->blob_data_);
this->blob_bottom_vec_.push_back(this->blob_label_);
- Caffe::set_mode(Caffe::CPU);
- LayerParameter param;
- param.mutable_hdf5_output_param()->set_file_name(this->output_file_name_);
- // This code block ensures that the layer is deconstructed and
- // the output hdf5 file is closed.
- {
- HDF5OutputLayer<TypeParam> layer(param);
- EXPECT_EQ(layer.file_name(), this->output_file_name_);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
- }
- file_id = H5Fopen(this->output_file_name_.c_str(), H5F_ACC_RDONLY,
- H5P_DEFAULT);
- ASSERT_GE(
- file_id, 0)<< "Failed to open HDF5 file" <<
- this->input_file_name_;
-
- Blob<TypeParam>* blob_data = new Blob<TypeParam>();
- hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
- blob_data);
- this->CheckBlobEqual(*(this->blob_data_), *blob_data);
-
- Blob<TypeParam>* blob_label = new Blob<TypeParam>();
- hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
- blob_label);
- this->CheckBlobEqual(*(this->blob_label_), *blob_label);
-
- status = H5Fclose(file_id);
- EXPECT_GE(status, 0) << "Failed to close HDF5 file " <<
- this->output_file_name_;
-}
-
-TYPED_TEST(HDF5OutputLayerTest, TestGPUForward) {
- LOG(INFO) << "Loading HDF5 file " << this->input_file_name_;
-
- hid_t file_id = H5Fopen(this->input_file_name_.c_str(), H5F_ACC_RDONLY,
- H5P_DEFAULT);
- ASSERT_GE(file_id, 0) << "Failed to open HDF5 file" <<
- this->input_file_name_;
- hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
- this->blob_data_);
- hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
- this->blob_label_);
- herr_t status = H5Fclose(file_id);
- EXPECT_GE(status, 0) << "Failed to close HDF5 file " <<
- this->input_file_name_;
- this->blob_bottom_vec_.push_back(this->blob_data_);
- this->blob_bottom_vec_.push_back(this->blob_label_);
-
- Caffe::set_mode(Caffe::GPU);
LayerParameter param;
param.mutable_hdf5_output_param()->set_file_name(this->output_file_name_);
// This code block ensures that the layer is deconstructed and
// the output hdf5 file is closed.
{
- HDF5OutputLayer<TypeParam> layer(param);
+ HDF5OutputLayer<Dtype> layer(param);
EXPECT_EQ(layer.file_name(), this->output_file_name_);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
file_id, 0)<< "Failed to open HDF5 file" <<
this->input_file_name_;
- Blob<TypeParam>* blob_data = new Blob<TypeParam>();
+ Blob<Dtype>* blob_data = new Blob<Dtype>();
hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
blob_data);
this->CheckBlobEqual(*(this->blob_data_), *blob_data);
- Blob<TypeParam>* blob_label = new Blob<TypeParam>();
+ Blob<Dtype>* blob_label = new Blob<Dtype>();
hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
blob_label);
this->CheckBlobEqual(*(this->blob_label_), *blob_label);
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class HDF5DataLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class HDF5DataLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
HDF5DataLayerTest()
: filename(NULL),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(HDF5DataLayerTest, Dtypes);
+TYPED_TEST_CASE(HDF5DataLayerTest, TestDtypesAndDevices);
TYPED_TEST(HDF5DataLayerTest, TestRead) {
+ typedef typename TypeParam::Dtype Dtype;
// Create LayerParameter with the known parameters.
// The data file we are reading has 10 rows and 8 columns,
// with values from 0 to 10*8 reshaped in row-major order.
int width = 5;
// Test that the layer setup got the correct parameters.
- HDF5DataLayer<TypeParam> layer(param);
+ HDF5DataLayer<Dtype> layer(param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
EXPECT_EQ(this->blob_top_data_->num(), batch_size);
EXPECT_EQ(this->blob_top_data_->channels(), num_cols);
EXPECT_EQ(this->blob_top_label_->height(), 1);
EXPECT_EQ(this->blob_top_label_->width(), 1);
- for (int t = 0; t < 2; ++t) {
- // TODO: make this a TypedTest instead of this silly loop.
- if (t == 0) {
- Caffe::set_mode(Caffe::CPU);
- } else {
- Caffe::set_mode(Caffe::GPU);
+ layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+
+ // Go through the data 10 times (5 batches).
+ const int data_size = num_cols * height * width;
+ for (int iter = 0; iter < 10; ++iter) {
+ layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+
+ // On even iterations, we're reading the first half of the data.
+ // On odd iterations, we're reading the second half of the data.
+ int label_offset = (iter % 2 == 0) ? 0 : batch_size;
+ int data_offset = (iter % 2 == 0) ? 0 : batch_size * data_size;
+
+ // Every two iterations we are reading the second file,
+ // which has the same labels, but data is offset by total data size,
+ // which is 2000 (see generate_sample_data).
+ int file_offset = (iter % 4 < 2) ? 0 : 2000;
+
+ for (int i = 0; i < batch_size; ++i) {
+ EXPECT_EQ(
+ label_offset + i,
+ this->blob_top_label_->cpu_data()[i]);
}
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
-
- // Go through the data 10 times (5 batches).
- const int data_size = num_cols * height * width;
- for (int iter = 0; iter < 10; ++iter) {
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
-
- // On even iterations, we're reading the first half of the data.
- // On odd iterations, we're reading the second half of the data.
- int label_offset = (iter % 2 == 0) ? 0 : batch_size;
- int data_offset = (iter % 2 == 0) ? 0 : batch_size * data_size;
-
- // Every two iterations we are reading the second file,
- // which has the same labels, but data is offset by total data size,
- // which is 2000 (see generate_sample_data).
- int file_offset = (iter % 4 < 2) ? 0 : 2000;
-
- for (int i = 0; i < batch_size; ++i) {
- EXPECT_EQ(
- label_offset + i,
- this->blob_top_label_->cpu_data()[i]);
- }
- for (int i = 0; i < batch_size; ++i) {
- for (int j = 0; j < num_cols; ++j) {
- for (int h = 0; h < height; ++h) {
- for (int w = 0; w < width; ++w) {
- int idx = (
- i * num_cols * height * width +
- j * height * width +
- h * width + w);
- EXPECT_EQ(
- file_offset + data_offset + idx,
- this->blob_top_data_->cpu_data()[idx])
- << "debug: i " << i << " j " << j
- << " iter " << iter << " t " << t;
- }
+ for (int i = 0; i < batch_size; ++i) {
+ for (int j = 0; j < num_cols; ++j) {
+ for (int h = 0; h < height; ++h) {
+ for (int w = 0; w < width; ++w) {
+ int idx = (
+ i * num_cols * height * width +
+ j * height * width +
+ h * width + w);
+ EXPECT_EQ(
+ file_offset + data_offset + idx,
+ this->blob_top_data_->cpu_data()[idx])
+ << "debug: i " << i << " j " << j
+ << " iter " << iter;
}
}
}
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class HingeLossLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class HingeLossLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
HingeLossLayerTest()
: blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(HingeLossLayerTest, Dtypes);
+TYPED_TEST_CASE(HingeLossLayerTest, TestDtypesAndDevices);
-TYPED_TEST(HingeLossLayerTest, TestGradientL1CPU) {
+TYPED_TEST(HingeLossLayerTest, TestGradientL1) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- HingeLossLayer<TypeParam> layer(layer_param);
+ HingeLossLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 1, 0.01);
+ GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 1, 0.01);
checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_), 0, -1, -1);
}
-TYPED_TEST(HingeLossLayerTest, TestGradientL1GPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- HingeLossLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 1, 0.01);
- checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 0, -1, -1);
-}
-
-
-TYPED_TEST(HingeLossLayerTest, TestGradientL2CPU) {
+TYPED_TEST(HingeLossLayerTest, TestGradientL2) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
// Set norm to L2
HingeLossParameter* hinge_loss_param = layer_param.mutable_hinge_loss_param();
hinge_loss_param->set_norm(HingeLossParameter_Norm_L2);
- Caffe::set_mode(Caffe::CPU);
- HingeLossLayer<TypeParam> layer(layer_param);
+ HingeLossLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- GradientChecker<TypeParam> checker(1e-2, 2e-3, 1701);
+ GradientChecker<Dtype> checker(1e-2, 2e-3, 1701);
checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_), 0, -1, -1);
}
-TYPED_TEST(HingeLossLayerTest, TestGradientL2GPU) {
- LayerParameter layer_param;
- // Set norm to L2
- HingeLossParameter* hinge_loss_param = layer_param.mutable_hinge_loss_param();
- hinge_loss_param->set_norm(HingeLossParameter_Norm_L2);
- Caffe::set_mode(Caffe::GPU);
- HingeLossLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- GradientChecker<TypeParam> checker(1e-2, 2e-3, 1701);
- checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 0, -1, -1);
-}
-
} // namespace caffe
int width_col_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(Im2colKernelTest, Dtypes);
+TYPED_TEST_CASE(Im2colKernelTest, TestDtypes);
TYPED_TEST(Im2colKernelTest, TestGPU) {
Caffe::set_mode(Caffe::GPU);
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class Im2colLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class Im2colLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
protected:
Im2colLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(Im2colLayerTest, Dtypes);
+TYPED_TEST_CASE(Im2colLayerTest, TestDtypesAndDevices);
TYPED_TEST(Im2colLayerTest, TestSetup) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_kernel_size(3);
convolution_param->set_stride(2);
- Im2colLayer<TypeParam> layer(layer_param);
+ Im2colLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 27);
EXPECT_EQ(this->blob_top_->width(), 2);
}
-TYPED_TEST(Im2colLayerTest, TestCPU) {
+TYPED_TEST(Im2colLayerTest, TestForward) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_kernel_size(3);
convolution_param->set_stride(2);
- Im2colLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::CPU);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- // We are lazy and will only check the top left block
- for (int c = 0; c < 27; ++c) {
- EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0),
- this->blob_bottom_->data_at(0, (c / 9), (c / 3) % 3, c % 3));
- }
-}
-
-TYPED_TEST(Im2colLayerTest, TestGPU) {
- LayerParameter layer_param;
- ConvolutionParameter* convolution_param =
- layer_param.mutable_convolution_param();
- convolution_param->set_kernel_size(3);
- convolution_param->set_stride(2);
- Im2colLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::GPU);
+ Im2colLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// We are lazy and will only check the top left block
}
}
-TYPED_TEST(Im2colLayerTest, TestCPUGradient) {
- LayerParameter layer_param;
- ConvolutionParameter* convolution_param =
- layer_param.mutable_convolution_param();
- convolution_param->set_kernel_size(3);
- convolution_param->set_stride(2);
- Caffe::set_mode(Caffe::CPU);
- Im2colLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(Im2colLayerTest, TestGPUGradient) {
+TYPED_TEST(Im2colLayerTest, TestGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_kernel_size(3);
convolution_param->set_stride(2);
- Caffe::set_mode(Caffe::GPU);
- Im2colLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
+ Im2colLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-2);
checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class ImageDataLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class ImageDataLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
ImageDataLayerTest()
: seed_(1701),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(ImageDataLayerTest, Dtypes);
+TYPED_TEST_CASE(ImageDataLayerTest, TestDtypesAndDevices);
TYPED_TEST(ImageDataLayerTest, TestRead) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter param;
ImageDataParameter* image_data_param = param.mutable_image_data_param();
image_data_param->set_batch_size(5);
image_data_param->set_source(this->filename_->c_str());
image_data_param->set_shuffle(false);
- ImageDataLayer<TypeParam> layer(param);
+ ImageDataLayer<Dtype> layer(param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
EXPECT_EQ(this->blob_top_data_->num(), 5);
EXPECT_EQ(this->blob_top_data_->channels(), 3);
}
TYPED_TEST(ImageDataLayerTest, TestResize) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter param;
ImageDataParameter* image_data_param = param.mutable_image_data_param();
image_data_param->set_batch_size(5);
image_data_param->set_new_height(256);
image_data_param->set_new_width(256);
image_data_param->set_shuffle(false);
- ImageDataLayer<TypeParam> layer(param);
+ ImageDataLayer<Dtype> layer(param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
EXPECT_EQ(this->blob_top_data_->num(), 5);
EXPECT_EQ(this->blob_top_data_->channels(), 3);
}
TYPED_TEST(ImageDataLayerTest, TestShuffle) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter param;
ImageDataParameter* image_data_param = param.mutable_image_data_param();
image_data_param->set_batch_size(5);
image_data_param->set_source(this->filename_->c_str());
image_data_param->set_shuffle(true);
- ImageDataLayer<TypeParam> layer(param);
+ ImageDataLayer<Dtype> layer(param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
EXPECT_EQ(this->blob_top_data_->num(), 5);
EXPECT_EQ(this->blob_top_data_->channels(), 3);
// Go through the data twice
for (int iter = 0; iter < 2; ++iter) {
layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
- map<TypeParam, int> values_to_indices;
+ map<Dtype, int> values_to_indices;
int num_in_order = 0;
for (int i = 0; i < 5; ++i) {
- TypeParam value = this->blob_top_label_->cpu_data()[i];
+ Dtype value = this->blob_top_label_->cpu_data()[i];
// Check that the value has not been seen already (no duplicates).
EXPECT_EQ(values_to_indices.find(value), values_to_indices.end());
values_to_indices[value] = i;
- num_in_order += (value == TypeParam(i));
+ num_in_order += (value == Dtype(i));
}
EXPECT_EQ(5, values_to_indices.size());
EXPECT_GT(5, num_in_order);
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class InnerProductLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class InnerProductLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
protected:
InnerProductLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(InnerProductLayerTest, Dtypes);
+TYPED_TEST_CASE(InnerProductLayerTest, TestDtypesAndDevices);
TYPED_TEST(InnerProductLayerTest, TestSetUp) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
InnerProductParameter* inner_product_param =
layer_param.mutable_inner_product_param();
inner_product_param->set_num_output(10);
- shared_ptr<InnerProductLayer<TypeParam> > layer(
- new InnerProductLayer<TypeParam>(layer_param));
+ shared_ptr<InnerProductLayer<Dtype> > layer(
+ new InnerProductLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->height(), 1);
EXPECT_EQ(this->blob_top_->channels(), 10);
}
-TYPED_TEST(InnerProductLayerTest, TestCPU) {
- LayerParameter layer_param;
- InnerProductParameter* inner_product_param =
- layer_param.mutable_inner_product_param();
- Caffe::set_mode(Caffe::CPU);
- inner_product_param->set_num_output(10);
- inner_product_param->mutable_weight_filler()->set_type("uniform");
- inner_product_param->mutable_bias_filler()->set_type("uniform");
- inner_product_param->mutable_bias_filler()->set_min(1);
- inner_product_param->mutable_bias_filler()->set_max(2);
- shared_ptr<InnerProductLayer<TypeParam> > layer(
- new InnerProductLayer<TypeParam>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- const TypeParam* data = this->blob_top_->cpu_data();
- const int count = this->blob_top_->count();
- for (int i = 0; i < count; ++i) {
- EXPECT_GE(data[i], 1.);
- }
-}
-
-TYPED_TEST(InnerProductLayerTest, TestGPU) {
- if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
+TYPED_TEST(InnerProductLayerTest, TestForward) {
+ typedef typename TypeParam::Dtype Dtype;
+ if (Caffe::mode() == Caffe::CPU ||
+ sizeof(Dtype) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
LayerParameter layer_param;
InnerProductParameter* inner_product_param =
layer_param.mutable_inner_product_param();
- Caffe::set_mode(Caffe::GPU);
inner_product_param->set_num_output(10);
inner_product_param->mutable_weight_filler()->set_type("uniform");
inner_product_param->mutable_bias_filler()->set_type("uniform");
inner_product_param->mutable_bias_filler()->set_min(1);
inner_product_param->mutable_bias_filler()->set_max(2);
- shared_ptr<InnerProductLayer<TypeParam> > layer(
- new InnerProductLayer<TypeParam>(layer_param));
+ shared_ptr<InnerProductLayer<Dtype> > layer(
+ new InnerProductLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- const TypeParam* data = this->blob_top_->cpu_data();
+ const Dtype* data = this->blob_top_->cpu_data();
const int count = this->blob_top_->count();
for (int i = 0; i < count; ++i) {
EXPECT_GE(data[i], 1.);
}
}
-TYPED_TEST(InnerProductLayerTest, TestCPUGradient) {
- LayerParameter layer_param;
- InnerProductParameter* inner_product_param =
- layer_param.mutable_inner_product_param();
- Caffe::set_mode(Caffe::CPU);
- inner_product_param->set_num_output(10);
- inner_product_param->mutable_weight_filler()->set_type("gaussian");
- inner_product_param->mutable_bias_filler()->set_type("gaussian");
- inner_product_param->mutable_bias_filler()->set_min(1);
- inner_product_param->mutable_bias_filler()->set_max(2);
- InnerProductLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(InnerProductLayerTest, TestGPUGradient) {
- if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
+TYPED_TEST(InnerProductLayerTest, TestGradient) {
+ typedef typename TypeParam::Dtype Dtype;
+ if (Caffe::mode() == Caffe::CPU ||
+ sizeof(Dtype) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
LayerParameter layer_param;
InnerProductParameter* inner_product_param =
layer_param.mutable_inner_product_param();
- Caffe::set_mode(Caffe::GPU);
inner_product_param->set_num_output(10);
inner_product_param->mutable_weight_filler()->set_type("gaussian");
inner_product_param->mutable_bias_filler()->set_type("gaussian");
- InnerProductLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradient(&layer, &(this->blob_bottom_vec_),
+ inner_product_param->mutable_bias_filler()->set_min(1);
+ inner_product_param->mutable_bias_filler()->set_max(2);
+ InnerProductLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
} else {
LOG(ERROR) << "Skipping test due to old architecture.";
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class LRNLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class LRNLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
LRNLayerTest()
: epsilon_(Dtype(1e-5)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-template <typename Dtype>
-void LRNLayerTest<Dtype>::ReferenceLRNForward(
+template <typename TypeParam>
+void LRNLayerTest<TypeParam>::ReferenceLRNForward(
const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
Blob<Dtype>* blob_top) {
+ typedef typename TypeParam::Dtype Dtype;
blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
blob_bottom.height(), blob_bottom.width());
Dtype* top_data = blob_top->mutable_cpu_data();
}
}
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(LRNLayerTest, Dtypes);
+TYPED_TEST_CASE(LRNLayerTest, TestDtypesAndDevices);
TYPED_TEST(LRNLayerTest, TestSetupAcrossChannels) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- LRNLayer<TypeParam> layer(layer_param);
+ LRNLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 7);
EXPECT_EQ(this->blob_top_->width(), 3);
}
-TYPED_TEST(LRNLayerTest, TestCPUForwardAcrossChannels) {
- LayerParameter layer_param;
- LRNLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::CPU);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- Blob<TypeParam> top_reference;
- this->ReferenceLRNForward(*(this->blob_bottom_), layer_param,
- &top_reference);
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i],
- this->epsilon_);
- }
-}
-
-TYPED_TEST(LRNLayerTest, TestGPUForwardAcrossChannels) {
+TYPED_TEST(LRNLayerTest, TestForwardAcrossChannels) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- LRNLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::GPU);
+ LRNLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- Blob<TypeParam> top_reference;
+ Blob<Dtype> top_reference;
this->ReferenceLRNForward(*(this->blob_bottom_), layer_param,
&top_reference);
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
}
}
-TYPED_TEST(LRNLayerTest, TestCPUGradientAcrossChannels) {
+TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- LRNLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- Caffe::set_mode(Caffe::CPU);
+ LRNLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-2);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
for (int i = 0; i < this->blob_top_->count(); ++i) {
&(this->blob_top_vec_));
}
-TYPED_TEST(LRNLayerTest, TestGPUGradientAcrossChannels) {
- LayerParameter layer_param;
- LRNLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- Caffe::set_mode(Caffe::GPU);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- for (int i = 0; i < this->blob_top_->count(); ++i) {
- this->blob_top_->mutable_cpu_diff()[i] = 1.;
- }
- vector<bool> propagate_down(this->blob_bottom_vec_.size(), true);
- layer.Backward(this->blob_top_vec_, propagate_down,
- &(this->blob_bottom_vec_));
- // for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- // std::cout << "GPU diff " << this->blob_bottom_->cpu_diff()[i]
- // << std::endl;
- // }
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
layer_param.mutable_lrn_param()->set_norm_region(
LRNParameter_NormRegion_WITHIN_CHANNEL);
layer_param.mutable_lrn_param()->set_local_size(3);
- LRNLayer<TypeParam> layer(layer_param);
+ LRNLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 7);
EXPECT_EQ(this->blob_top_->width(), 3);
}
-TYPED_TEST(LRNLayerTest, TestCPUForwardWithinChannel) {
+TYPED_TEST(LRNLayerTest, TestForwardWithinChannel) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
layer_param.mutable_lrn_param()->set_norm_region(
LRNParameter_NormRegion_WITHIN_CHANNEL);
layer_param.mutable_lrn_param()->set_local_size(3);
- LRNLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::CPU);
+ LRNLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- Blob<TypeParam> top_reference;
+ Blob<Dtype> top_reference;
this->ReferenceLRNForward(*(this->blob_bottom_), layer_param,
&top_reference);
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
}
}
-TYPED_TEST(LRNLayerTest, TestGPUForwardWithinChannel) {
- LayerParameter layer_param;
- layer_param.mutable_lrn_param()->set_norm_region(
- LRNParameter_NormRegion_WITHIN_CHANNEL);
- layer_param.mutable_lrn_param()->set_local_size(3);
- LRNLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::GPU);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- Blob<TypeParam> top_reference;
- this->ReferenceLRNForward(*(this->blob_bottom_), layer_param,
- &top_reference);
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i],
- this->epsilon_);
- }
-}
-
-TYPED_TEST(LRNLayerTest, TestCPUGradientWithinChannel) {
- LayerParameter layer_param;
- layer_param.mutable_lrn_param()->set_norm_region(
- LRNParameter_NormRegion_WITHIN_CHANNEL);
- layer_param.mutable_lrn_param()->set_local_size(3);
- LRNLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- Caffe::set_mode(Caffe::CPU);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- for (int i = 0; i < this->blob_top_->count(); ++i) {
- this->blob_top_->mutable_cpu_diff()[i] = 1.;
- }
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(LRNLayerTest, TestGPUGradientWithinChannel) {
+TYPED_TEST(LRNLayerTest, TestGradientWithinChannel) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
layer_param.mutable_lrn_param()->set_norm_region(
LRNParameter_NormRegion_WITHIN_CHANNEL);
layer_param.mutable_lrn_param()->set_local_size(3);
- LRNLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- Caffe::set_mode(Caffe::GPU);
+ LRNLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-2);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
for (int i = 0; i < this->blob_top_->count(); ++i) {
Blob<Dtype>* const blob_top_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(MathFunctionsTest, Dtypes);
+TYPED_TEST_CASE(MathFunctionsTest, TestDtypes);
TYPED_TEST(MathFunctionsTest, TestNothing) {
// The first test case of a test suite takes the longest time
namespace caffe {
-template <typename Dtype>
-class MaxPoolingDropoutTest : public ::testing::Test {
+template <typename TypeParam>
+class MaxPoolingDropoutTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
protected:
MaxPoolingDropoutTest()
: blob_bottom_(new Blob<Dtype>()),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(MaxPoolingDropoutTest, Dtypes);
+TYPED_TEST_CASE(MaxPoolingDropoutTest, TestDtypesAndDevices);
TYPED_TEST(MaxPoolingDropoutTest, TestSetup) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
- PoolingLayer<TypeParam> max_layer(layer_param);
+ PoolingLayer<Dtype> max_layer(layer_param);
max_layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- DropoutLayer<TypeParam> dropout_layer(layer_param);
+ DropoutLayer<Dtype> dropout_layer(layer_param);
dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
}
-TYPED_TEST(MaxPoolingDropoutTest, CPUForward) {
- Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(MaxPoolingDropoutTest, TestForward) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
- PoolingLayer<TypeParam> layer(layer_param);
+ PoolingLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- const TypeParam* top_data = this->blob_top_->cpu_data();
- TypeParam sum = 0.;
+ const Dtype* top_data = this->blob_top_->cpu_data();
+ Dtype sum = 0.;
for (int i = 0; i < this->blob_top_->count(); ++i) {
sum += top_data[i];
}
EXPECT_EQ(sum, this->blob_top_->count());
// Dropout in-place
- DropoutLayer<TypeParam> dropout_layer(layer_param);
+ DropoutLayer<Dtype> dropout_layer(layer_param);
dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
sum = 0.;
- TypeParam scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
+ Dtype scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
top_data = this->blob_top_->cpu_data();
for (int i = 0; i < this->blob_top_->count(); ++i) {
sum += top_data[i];
EXPECT_LE(sum, this->blob_top_->count()*scale);
}
-TYPED_TEST(MaxPoolingDropoutTest, GPUForward) {
- Caffe::set_mode(Caffe::GPU);
- LayerParameter layer_param;
- PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
- pooling_param->set_kernel_size(3);
- pooling_param->set_stride(2);
- PoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- const TypeParam* top_data = this->blob_top_->cpu_data();
- TypeParam sum = 0.;
- for (int i = 0; i < this->blob_top_->count(); ++i) {
- sum += top_data[i];
- }
- EXPECT_EQ(sum, this->blob_top_->count());
-
- DropoutLayer<TypeParam> dropout_layer(layer_param);
- dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
- dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
- sum = 0.;
- TypeParam scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
- top_data = this->blob_top_->cpu_data();
- for (int i = 0; i < this->blob_top_->count(); ++i) {
- sum += top_data[i];
- }
- EXPECT_GE(sum, 0);
- EXPECT_LE(sum, this->blob_top_->count()*scale);
-}
-
-TYPED_TEST(MaxPoolingDropoutTest, CPUBackward) {
- Caffe::set_mode(Caffe::CPU);
- Caffe::set_phase(Caffe::TRAIN);
- LayerParameter layer_param;
- PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
- pooling_param->set_kernel_size(3);
- pooling_param->set_stride(2);
- PoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- for (int i = 0; i < this->blob_top_->count(); ++i) {
- this->blob_top_->mutable_cpu_diff()[i] = 1.;
- }
- vector<bool> propagate_down(this->blob_bottom_vec_.size(), true);
- layer.Backward(this->blob_top_vec_, propagate_down,
- &(this->blob_bottom_vec_));
- const TypeParam* bottom_diff = this->blob_bottom_->cpu_diff();
- TypeParam sum = 0.;
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- sum += bottom_diff[i];
- }
- EXPECT_EQ(sum, this->blob_top_->count());
- // Dropout in-place
- DropoutLayer<TypeParam> dropout_layer(layer_param);
- dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
- dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
- dropout_layer.Backward(this->blob_top_vec_, propagate_down,
- &(this->blob_top_vec_));
- layer.Backward(this->blob_top_vec_, propagate_down,
- &(this->blob_bottom_vec_));
- TypeParam sum_with_dropout = 0.;
- bottom_diff = this->blob_bottom_->cpu_diff();
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- sum_with_dropout += bottom_diff[i];
- }
- EXPECT_GE(sum_with_dropout, sum);
-}
-
-TYPED_TEST(MaxPoolingDropoutTest, GPUBackward) {
- Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(MaxPoolingDropoutTest, TestBackward) {
+ typedef typename TypeParam::Dtype Dtype;
Caffe::set_phase(Caffe::TRAIN);
LayerParameter layer_param;
PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
- PoolingLayer<TypeParam> layer(layer_param);
+ PoolingLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
for (int i = 0; i < this->blob_top_->count(); ++i) {
vector<bool> propagate_down(this->blob_bottom_vec_.size(), true);
layer.Backward(this->blob_top_vec_, propagate_down,
&(this->blob_bottom_vec_));
- const TypeParam* bottom_diff = this->blob_bottom_->cpu_diff();
- TypeParam sum = 0.;
+ const Dtype* bottom_diff = this->blob_bottom_->cpu_diff();
+ Dtype sum = 0.;
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
sum += bottom_diff[i];
}
EXPECT_EQ(sum, this->blob_top_->count());
// Dropout in-place
- DropoutLayer<TypeParam> dropout_layer(layer_param);
+ DropoutLayer<Dtype> dropout_layer(layer_param);
dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
dropout_layer.Backward(this->blob_top_vec_, propagate_down,
&(this->blob_top_vec_));
layer.Backward(this->blob_top_vec_, propagate_down,
&(this->blob_bottom_vec_));
- TypeParam sum_with_dropout = 0.;
+ Dtype sum_with_dropout = 0.;
bottom_diff = this->blob_bottom_->cpu_diff();
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
sum_with_dropout += bottom_diff[i];
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(MemoryDataLayerTest, Dtypes);
+TYPED_TEST_CASE(MemoryDataLayerTest, TestDtypes);
TYPED_TEST(MemoryDataLayerTest, TestSetup) {
LayerParameter layer_param;
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, Dtypes);
+TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes);
TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) {
namespace caffe {
-template <typename Dtype>
+template <typename TypeParam>
class NetTest : public ::testing::Test {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
NetTest() : seed_(1701) {}
shared_ptr<Net<Dtype> > net_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(NetTest, Dtypes);
+TYPED_TEST_CASE(NetTest, TestDtypesAndDevices);
TYPED_TEST(NetTest, TestHasBlob) {
this->InitTinyNet();
}
TYPED_TEST(NetTest, TestBottomNeedBackwardForce) {
+ typedef typename TypeParam::Dtype Dtype;
const bool force_backward = true;
this->InitTinyNet(force_backward);
const vector<vector<bool> >& bottom_need_backward =
}
TYPED_TEST(NetTest, TestBottomNeedBackwardEuclideanForce) {
+ typedef typename TypeParam::Dtype Dtype;
const bool force_backward = true;
this->InitTinyNetEuclidean(force_backward);
const vector<vector<bool> >& bottom_need_backward =
}
TYPED_TEST(NetTest, TestUnsharedWeightsDataNet) {
+ typedef typename TypeParam::Dtype Dtype;
this->InitUnsharedWeightsNet();
- vector<Blob<TypeParam>*> bottom;
- TypeParam loss;
+ vector<Blob<Dtype>*> bottom;
+ Dtype loss;
this->net_->Forward(bottom, &loss);
EXPECT_GT(loss, 0);
}
TYPED_TEST(NetTest, TestSharedWeightsDataNet) {
+ typedef typename TypeParam::Dtype Dtype;
this->InitSharedWeightsNet();
- vector<Blob<TypeParam>*> bottom;
- TypeParam loss;
+ vector<Blob<Dtype>*> bottom;
+ Dtype loss;
this->net_->Forward(bottom, &loss);
EXPECT_FLOAT_EQ(loss, 0);
}
TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) {
+ typedef typename TypeParam::Dtype Dtype;
this->InitUnsharedWeightsNet();
- vector<Blob<TypeParam>*> bottom;
- Net<TypeParam>* net = this->net_.get();
+ vector<Blob<Dtype>*> bottom;
+ Net<Dtype>* net = this->net_.get();
net->Forward(bottom);
net->Backward();
- Layer<TypeParam>* ip1_layer = net->layer_by_name("innerproduct1").get();
- Layer<TypeParam>* ip2_layer = net->layer_by_name("innerproduct2").get();
+ Layer<Dtype>* ip1_layer = net->layer_by_name("innerproduct1").get();
+ Layer<Dtype>* ip2_layer = net->layer_by_name("innerproduct2").get();
const int count = ip1_layer->blobs()[0]->count();
- const TypeParam* grad1 = ip1_layer->blobs()[0]->cpu_diff();
- const TypeParam* grad2 = ip2_layer->blobs()[0]->cpu_diff();
+ const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff();
+ const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff();
for (int i = 0; i < count; ++i) {
EXPECT_GT(fabs(grad1[i]), 0);
EXPECT_FLOAT_EQ(-1 * grad1[i], grad2[i]);
}
TYPED_TEST(NetTest, TestSharedWeightsDiffNet) {
+ typedef typename TypeParam::Dtype Dtype;
this->InitSharedWeightsNet();
- vector<Blob<TypeParam>*> bottom;
- Net<TypeParam>* net = this->net_.get();
- TypeParam loss;
+ vector<Blob<Dtype>*> bottom;
+ Net<Dtype>* net = this->net_.get();
+ Dtype loss;
net->Forward(bottom, &loss);
net->Backward();
EXPECT_FLOAT_EQ(loss, 0);
- Layer<TypeParam>* ip1_layer = net->layer_by_name("innerproduct1").get();
- Layer<TypeParam>* ip2_layer = net->layer_by_name("innerproduct2").get();
+ Layer<Dtype>* ip1_layer = net->layer_by_name("innerproduct1").get();
+ Layer<Dtype>* ip2_layer = net->layer_by_name("innerproduct2").get();
const int count = ip1_layer->blobs()[0]->count();
- const TypeParam* grad1 = ip1_layer->blobs()[0]->cpu_diff();
- const TypeParam* grad2 = ip2_layer->blobs()[0]->cpu_diff();
+ const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff();
+ const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff();
for (int i = 0; i < count; ++i) {
EXPECT_FLOAT_EQ(0, grad1[i]);
EXPECT_FLOAT_EQ(0, grad2[i]);
}
}
-TYPED_TEST(NetTest, TestSharedWeightsUpdateCPU) {
- Caffe::set_random_seed(this->seed_);
- Caffe::set_mode(Caffe::CPU);
- this->InitDiffDataSharedWeightsNet();
- vector<Blob<TypeParam>*> bottom;
- EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1");
- EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2");
- Blob<TypeParam>* ip1_weights = this->net_->layers()[1]->blobs()[0].get();
- Blob<TypeParam>* ip2_weights = this->net_->layers()[2]->blobs()[0].get();
- // Check that data blobs of shared weights share the same location in memory.
- EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data());
- // Check that diff blobs of shared weights are at different locations in
- // locations. (The diffs should be accumulated at update time.)
- EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff());
- this->net_->Forward(bottom);
- this->net_->Backward();
- // Compute the expected update as the data minus the two diffs.
- Blob<TypeParam> shared_params;
- const bool reshape = true;
- const bool copy_diff = false;
- shared_params.CopyFrom(*ip1_weights, copy_diff, reshape);
- shared_params.CopyFrom(*ip1_weights, !copy_diff, reshape);
- const int count = ip1_weights->count();
- // Make sure the diffs are non-trivial.
- for (int i = 0; i < count; ++i) {
- EXPECT_NE(0, ip1_weights->cpu_diff()[i]);
- EXPECT_NE(0, ip2_weights->cpu_diff()[i]);
- EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]);
- }
- caffe_axpy(count, TypeParam(1), ip2_weights->cpu_diff(),
- shared_params.mutable_cpu_diff());
- caffe_axpy(count, TypeParam(-1), shared_params.cpu_diff(),
- shared_params.mutable_cpu_data());
- const TypeParam* expected_updated_params = shared_params.cpu_data();
- this->net_->Update();
- const TypeParam* actual_updated_params = ip1_weights->cpu_data();
- for (int i = 0; i < count; ++i) {
- EXPECT_EQ(expected_updated_params[i], actual_updated_params[i]);
- }
- // Check that data blobs of shared weights STILL point to the same memory
- // location (because ... who knows).
- EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data());
-
- Caffe::set_random_seed(this->seed_);
- this->InitDiffDataUnsharedWeightsNet();
- EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1");
- EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2");
- ip1_weights = this->net_->layers()[1]->blobs()[0].get();
- ip2_weights = this->net_->layers()[2]->blobs()[0].get();
- // Check that data and diff blobs of unshared weights are at different
- // locations in memory.
- EXPECT_NE(ip1_weights->cpu_data(), ip2_weights->cpu_data());
- EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff());
- this->net_->Forward(bottom);
- this->net_->Backward();
- // Compute the expected update.
- Blob<TypeParam> unshared_params1;
- unshared_params1.CopyFrom(*ip1_weights, copy_diff, reshape);
- unshared_params1.CopyFrom(*ip1_weights, !copy_diff, reshape);
- Blob<TypeParam> unshared_params2;
- unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape);
- unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape);
- // Make sure the diffs are non-trivial and sum to the diff in the shared net.
- for (int i = 0; i < count; ++i) {
- EXPECT_NE(0, ip1_weights->cpu_diff()[i]);
- EXPECT_NE(0, ip2_weights->cpu_diff()[i]);
- EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]);
- EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i],
- shared_params.cpu_diff()[i]);
- }
- caffe_axpy(count, TypeParam(-1), ip1_weights->cpu_diff(),
- unshared_params1.mutable_cpu_data());
- caffe_axpy(count, TypeParam(-1), ip2_weights->cpu_diff(),
- unshared_params2.mutable_cpu_data());
- const TypeParam* expected_updated_params1 = unshared_params1.cpu_data();
- const TypeParam* expected_updated_params2 = unshared_params2.cpu_data();
- this->net_->Update();
- const TypeParam* actual_updated_params1 = ip1_weights->cpu_data();
- const TypeParam* actual_updated_params2 = ip2_weights->cpu_data();
- for (int i = 0; i < count; ++i) {
- EXPECT_EQ(expected_updated_params1[i], actual_updated_params1[i]);
- EXPECT_EQ(expected_updated_params2[i], actual_updated_params2[i]);
- EXPECT_NE(actual_updated_params1[i], actual_updated_params2[i]);
- EXPECT_NE(expected_updated_params, expected_updated_params1);
- }
-}
-
-TYPED_TEST(NetTest, TestSharedWeightsUpdateGPU) {
+TYPED_TEST(NetTest, TestSharedWeightsUpdate) {
+ typedef typename TypeParam::Dtype Dtype;
Caffe::set_random_seed(this->seed_);
- Caffe::set_mode(Caffe::GPU);
this->InitDiffDataSharedWeightsNet();
- vector<Blob<TypeParam>*> bottom;
+ vector<Blob<Dtype>*> bottom;
EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1");
EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2");
- Blob<TypeParam>* ip1_weights = this->net_->layers()[1]->blobs()[0].get();
- Blob<TypeParam>* ip2_weights = this->net_->layers()[2]->blobs()[0].get();
+ Blob<Dtype>* ip1_weights = this->net_->layers()[1]->blobs()[0].get();
+ Blob<Dtype>* ip2_weights = this->net_->layers()[2]->blobs()[0].get();
// Check that data blobs of shared weights share the same location in memory.
EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data());
// Check that diff blobs of shared weights are at different locations in
this->net_->Forward(bottom);
this->net_->Backward();
// Compute the expected update as the data minus the two diffs.
- Blob<TypeParam> shared_params;
+ Blob<Dtype> shared_params;
const bool reshape = true;
const bool copy_diff = false;
shared_params.CopyFrom(*ip1_weights, copy_diff, reshape);
EXPECT_NE(0, ip2_weights->cpu_diff()[i]);
EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]);
}
- caffe_axpy(count, TypeParam(1), ip2_weights->cpu_diff(),
+ caffe_axpy(count, Dtype(1), ip2_weights->cpu_diff(),
shared_params.mutable_cpu_diff());
- caffe_axpy(count, TypeParam(-1), shared_params.cpu_diff(),
+ caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(),
shared_params.mutable_cpu_data());
- const TypeParam* expected_updated_params = shared_params.cpu_data();
+ const Dtype* expected_updated_params = shared_params.cpu_data();
this->net_->Update();
- const TypeParam* actual_updated_params = ip1_weights->cpu_data();
+ const Dtype* actual_updated_params = ip1_weights->cpu_data();
for (int i = 0; i < count; ++i) {
EXPECT_EQ(expected_updated_params[i], actual_updated_params[i]);
}
this->net_->Forward(bottom);
this->net_->Backward();
// Compute the expected update.
- Blob<TypeParam> unshared_params1;
+ Blob<Dtype> unshared_params1;
unshared_params1.CopyFrom(*ip1_weights, copy_diff, reshape);
unshared_params1.CopyFrom(*ip1_weights, !copy_diff, reshape);
- Blob<TypeParam> unshared_params2;
+ Blob<Dtype> unshared_params2;
unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape);
unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape);
// Make sure the diffs are non-trivial and sum to the diff in the shared net.
EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i],
shared_params.cpu_diff()[i]);
}
- caffe_axpy(count, TypeParam(-1), ip1_weights->cpu_diff(),
+ caffe_axpy(count, Dtype(-1), ip1_weights->cpu_diff(),
unshared_params1.mutable_cpu_data());
- caffe_axpy(count, TypeParam(-1), ip2_weights->cpu_diff(),
+ caffe_axpy(count, Dtype(-1), ip2_weights->cpu_diff(),
unshared_params2.mutable_cpu_data());
- const TypeParam* expected_updated_params1 = unshared_params1.cpu_data();
- const TypeParam* expected_updated_params2 = unshared_params2.cpu_data();
+ const Dtype* expected_updated_params1 = unshared_params1.cpu_data();
+ const Dtype* expected_updated_params2 = unshared_params2.cpu_data();
this->net_->Update();
- const TypeParam* actual_updated_params1 = ip1_weights->cpu_data();
- const TypeParam* actual_updated_params2 = ip2_weights->cpu_data();
+ const Dtype* actual_updated_params1 = ip1_weights->cpu_data();
+ const Dtype* actual_updated_params2 = ip2_weights->cpu_data();
for (int i = 0; i < count; ++i) {
EXPECT_EQ(expected_updated_params1[i], actual_updated_params1[i]);
EXPECT_EQ(expected_updated_params2[i], actual_updated_params2[i]);
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class NeuronLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class NeuronLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
protected:
NeuronLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(NeuronLayerTest, Dtypes);
+TYPED_TEST_CASE(NeuronLayerTest, TestDtypesAndDevices);
-TYPED_TEST(NeuronLayerTest, TestReLUCPU) {
+TYPED_TEST(NeuronLayerTest, TestReLU) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- ReLULayer<TypeParam> layer(layer_param);
+ ReLULayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_GE(top_data[i], 0.);
EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]);
}
}
-
-TYPED_TEST(NeuronLayerTest, TestReLUGradientCPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- ReLULayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestReLUGPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- ReLULayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- // Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- EXPECT_GE(top_data[i], 0.);
- EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]);
- }
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestReLUGradientGPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- ReLULayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestSigmoidCPU) {
+TYPED_TEST(NeuronLayerTest, TestReLUGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- SigmoidLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- // Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i])));
- // check that we squashed the value between 0 and 1
- EXPECT_GE(top_data[i], 0.);
- EXPECT_LE(top_data[i], 1.);
- }
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestSigmoidGradientCPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- SigmoidLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
+ ReLULayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
-TYPED_TEST(NeuronLayerTest, TestSigmoidGPU) {
+TYPED_TEST(NeuronLayerTest, TestSigmoid) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- SigmoidLayer<TypeParam> layer(layer_param);
+ SigmoidLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i])));
// check that we squashed the value between 0 and 1
}
}
-
-TYPED_TEST(NeuronLayerTest, TestSigmoidGradientGPU) {
+TYPED_TEST(NeuronLayerTest, TestSigmoidGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- SigmoidLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
+ SigmoidLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
-
-
-TYPED_TEST(NeuronLayerTest, TestDropoutCPU) {
+TYPED_TEST(NeuronLayerTest, TestDropout) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
Caffe::set_phase(Caffe::TRAIN);
- DropoutLayer<TypeParam> layer(layer_param);
+ DropoutLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
if (top_data[i] != 0) {
}
}
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGradientCPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- Caffe::set_phase(Caffe::TRAIN);
- DropoutLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGradientCPUTest) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- Caffe::set_phase(Caffe::TEST);
- DropoutLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(NeuronLayerTest, TestDropoutCPUTestPhase) {
+TYPED_TEST(NeuronLayerTest, TestDropoutTestPhase) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
Caffe::set_phase(Caffe::TEST);
- DropoutLayer<TypeParam> layer(layer_param);
+ DropoutLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
if (top_data[i] != 0) {
EXPECT_EQ(top_data[i], bottom_data[i]);
}
}
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGPU) {
+TYPED_TEST(NeuronLayerTest, TestDropoutGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
Caffe::set_phase(Caffe::TRAIN);
- DropoutLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- // Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
- float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- if (top_data[i] != 0) {
- EXPECT_EQ(top_data[i], bottom_data[i] * scale);
- }
- }
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGradientGPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- Caffe::set_phase(Caffe::TRAIN);
- DropoutLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
- // it is too expensive to call curand multiple times, so we don't do an
- // exhaustive gradient check.
- checker.CheckGradient(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGradientGPUTest) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- Caffe::set_phase(Caffe::TEST);
- DropoutLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
- // it is too expensive to call curand multiple times, so we don't do an
- // exhaustive gradient check.
- checker.CheckGradient(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ DropoutLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3);
+ checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
}
-
-TYPED_TEST(NeuronLayerTest, TestDropoutGPUTestPhase) {
+TYPED_TEST(NeuronLayerTest, TestDropoutGradientTest) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
Caffe::set_phase(Caffe::TEST);
- DropoutLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- // Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- if (top_data[i] != 0) {
- EXPECT_EQ(top_data[i], bottom_data[i]);
- }
- }
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestBNLLCPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- BNLLLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- // Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- EXPECT_GE(top_data[i], 0.);
- EXPECT_GE(top_data[i], bottom_data[i]);
- }
-}
-
-
-TYPED_TEST(NeuronLayerTest, TestBNLLGradientCPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- BNLLLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
+ DropoutLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3);
checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
-
-TYPED_TEST(NeuronLayerTest, TestBNLLGPU) {
+TYPED_TEST(NeuronLayerTest, TestBNLL) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- BNLLLayer<TypeParam> layer(layer_param);
+ BNLLLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_GE(top_data[i], 0.);
EXPECT_GE(top_data[i], bottom_data[i]);
}
}
-
-TYPED_TEST(NeuronLayerTest, TestBNLLGradientGPU) {
+TYPED_TEST(NeuronLayerTest, TestBNLLGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- BNLLLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
+ BNLLLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3);
checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class PoolingLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class PoolingLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
PoolingLayerTest()
: blob_bottom_(new Blob<Dtype>()),
}
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(PoolingLayerTest, Dtypes);
+TYPED_TEST_CASE(PoolingLayerTest, TestDtypesAndDevices);
TYPED_TEST(PoolingLayerTest, TestSetup) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
- PoolingLayer<TypeParam> layer(layer_param);
+ PoolingLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
}
TYPED_TEST(PoolingLayerTest, TestSetupPadded) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
pooling_param->set_pad(1);
pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
- PoolingLayer<TypeParam> layer(layer_param);
+ PoolingLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
}
/*
-TYPED_TEST(PoolingLayerTest, PrintGPUBackward) {
- LayerParameter layer_param;
- PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
- pooling_param->set_kernel_size(3);
- pooling_param->set_stride(2);
- pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
- Caffe::set_mode(Caffe::GPU);
- PoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
-
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl;
- }
- for (int i = 0; i < this->blob_top_->count(); ++i) {
- cout << "top data " << i << " " << this->blob_top_->cpu_data()[i] << endl;
- }
-
- for (int i = 0; i < this->blob_top_->count(); ++i) {
- this->blob_top_->mutable_cpu_diff()[i] = 1.;
- }
- layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_));
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl;
- }
-}
-*/
-
-/*
-TYPED_TEST(PoolingLayerTest, PrintCPUBackward) {
+TYPED_TEST(PoolingLayerTest, PrintBackward) {
LayerParameter layer_param;
layer_param.set_kernelsize(3);
layer_param.set_stride(2);
layer_param.set_pool(LayerParameter_PoolMethod_MAX);
- Caffe::set_mode(Caffe::CPU);
PoolingLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
}
*/
-TYPED_TEST(PoolingLayerTest, TestCPUForwardMax) {
- Caffe::set_mode(Caffe::CPU);
- this->TestForwardSquare();
- this->TestForwardRectHigh();
- this->TestForwardRectWide();
-}
-
-TYPED_TEST(PoolingLayerTest, TestGPUForwardMax) {
- Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(PoolingLayerTest, TestForwardMax) {
this->TestForwardSquare();
this->TestForwardRectHigh();
this->TestForwardRectWide();
}
-TYPED_TEST(PoolingLayerTest, TestCPUForwardMaxTopMask) {
- Caffe::set_mode(Caffe::CPU);
+TYPED_TEST(PoolingLayerTest, TestForwardMaxTopMask) {
this->blob_top_vec_.push_back(this->blob_top_mask_);
this->TestForwardSquare();
this->TestForwardRectHigh();
this->TestForwardRectWide();
}
-TYPED_TEST(PoolingLayerTest, TestGPUForwardMaxTopMask) {
- Caffe::set_mode(Caffe::GPU);
- this->blob_top_vec_.push_back(this->blob_top_mask_);
- this->TestForwardSquare();
- this->TestForwardRectHigh();
- this->TestForwardRectWide();
-}
-
-TYPED_TEST(PoolingLayerTest, TestCPUGradientMax) {
- for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
- for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
- LayerParameter layer_param;
- PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
- pooling_param->set_kernel_h(kernel_h);
- pooling_param->set_kernel_w(kernel_w);
- pooling_param->set_stride(2);
- pooling_param->set_pad(1);
- pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
- Caffe::set_mode(Caffe::CPU);
- PoolingLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-4, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
- }
- }
-}
-
-TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) {
+TYPED_TEST(PoolingLayerTest, TestGradientMax) {
+ typedef typename TypeParam::Dtype Dtype;
for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
LayerParameter layer_param;
pooling_param->set_stride(2);
pooling_param->set_pad(1);
pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
- Caffe::set_mode(Caffe::GPU);
- PoolingLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-4, 1e-2);
+ PoolingLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-4, 1e-2);
checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
}
}
-TYPED_TEST(PoolingLayerTest, TestCPUForwardMaxPadded) {
+TYPED_TEST(PoolingLayerTest, TestForwardMaxPadded) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
pooling_param->set_pad(2);
pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
- Caffe::set_mode(Caffe::CPU);
this->blob_bottom_->Reshape(1, 1, 3, 3);
// Input:
// [ 1 2 4 ]
this->blob_bottom_->mutable_cpu_data()[6] = 4;
this->blob_bottom_->mutable_cpu_data()[7] = 2;
this->blob_bottom_->mutable_cpu_data()[8] = 1;
- PoolingLayer<TypeParam> layer(layer_param);
+ PoolingLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), 1);
EXPECT_EQ(this->blob_top_->channels(), 1);
EXPECT_EQ(this->blob_top_->height(), 3);
EXPECT_EQ(this->blob_top_->width(), 3);
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- TypeParam epsilon = 1e-8;
+ Dtype epsilon = 1e-8;
// Output:
// [ 1 4 4 ]
// [ 4 4 4 ]
EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon);
}
-
-TYPED_TEST(PoolingLayerTest, TestGPUForwardMaxPadded) {
- LayerParameter layer_param;
- PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
- pooling_param->set_kernel_size(3);
- pooling_param->set_stride(2);
- pooling_param->set_pad(2);
- pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
- Caffe::set_mode(Caffe::GPU);
- this->blob_bottom_->Reshape(1, 1, 3, 3);
- // Input:
- // [ 1 2 4 ]
- // [ 2 3 2 ]
- // [ 4 2 1 ]
- this->blob_bottom_->mutable_cpu_data()[0] = 1;
- this->blob_bottom_->mutable_cpu_data()[1] = 2;
- this->blob_bottom_->mutable_cpu_data()[2] = 4;
- this->blob_bottom_->mutable_cpu_data()[3] = 2;
- this->blob_bottom_->mutable_cpu_data()[4] = 3;
- this->blob_bottom_->mutable_cpu_data()[5] = 2;
- this->blob_bottom_->mutable_cpu_data()[6] = 4;
- this->blob_bottom_->mutable_cpu_data()[7] = 2;
- this->blob_bottom_->mutable_cpu_data()[8] = 1;
- PoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- EXPECT_EQ(this->blob_top_->num(), 1);
- EXPECT_EQ(this->blob_top_->channels(), 1);
- EXPECT_EQ(this->blob_top_->height(), 3);
- EXPECT_EQ(this->blob_top_->width(), 3);
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- TypeParam epsilon = 1e-8;
- // Output:
- // [ 1 4 4 ]
- // [ 4 4 4 ]
- // [ 4 4 1 ]
- EXPECT_NEAR(this->blob_top_->cpu_data()[0], 1, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[2], 4, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[4], 4, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[6], 4, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon);
-}
-
-
-TYPED_TEST(PoolingLayerTest, TestCPUGradientMaxTopMask) {
+TYPED_TEST(PoolingLayerTest, TestGradientMaxTopMask) {
+ typedef typename TypeParam::Dtype Dtype;
for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
LayerParameter layer_param;
pooling_param->set_stride(2);
pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
this->blob_top_vec_.push_back(this->blob_top_mask_);
- Caffe::set_mode(Caffe::CPU);
- PoolingLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-4, 1e-2);
+ PoolingLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-4, 1e-2);
checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
this->blob_top_vec_.pop_back();
}
}
-TYPED_TEST(PoolingLayerTest, TestGPUGradientMaxTopMask) {
- for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
- for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
- LayerParameter layer_param;
- PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
- pooling_param->set_kernel_h(kernel_h);
- pooling_param->set_kernel_w(kernel_w);
- pooling_param->set_stride(2);
- pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
- this->blob_top_vec_.push_back(this->blob_top_mask_);
- Caffe::set_mode(Caffe::GPU);
- PoolingLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-4, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
- this->blob_top_vec_.pop_back();
- }
- }
-}
-
-
-TYPED_TEST(PoolingLayerTest, TestCPUForwardAve) {
+TYPED_TEST(PoolingLayerTest, TestForwardAve) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
pooling_param->set_kernel_size(3);
pooling_param->set_stride(1);
pooling_param->set_pad(1);
pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
- Caffe::set_mode(Caffe::CPU);
this->blob_bottom_->Reshape(1, 1, 3, 3);
FillerParameter filler_param;
- filler_param.set_value(TypeParam(2));
- ConstantFiller<TypeParam> filler(filler_param);
+ filler_param.set_value(Dtype(2));
+ ConstantFiller<Dtype> filler(filler_param);
filler.Fill(this->blob_bottom_);
- PoolingLayer<TypeParam> layer(layer_param);
+ PoolingLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), 1);
EXPECT_EQ(this->blob_top_->channels(), 1);
EXPECT_EQ(this->blob_top_->height(), 3);
EXPECT_EQ(this->blob_top_->width(), 3);
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- TypeParam epsilon = 1e-5;
+ Dtype epsilon = 1e-5;
EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon);
EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon);
EXPECT_NEAR(this->blob_top_->cpu_data()[2], 8.0 / 9, epsilon);
EXPECT_NEAR(this->blob_top_->cpu_data()[8], 8.0 / 9, epsilon);
}
-
-TYPED_TEST(PoolingLayerTest, TestGPUForwardAve) {
- LayerParameter layer_param;
- PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
- pooling_param->set_kernel_size(3);
- pooling_param->set_stride(1);
- pooling_param->set_pad(1);
- pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
- Caffe::set_mode(Caffe::GPU);
- this->blob_bottom_->Reshape(1, 1, 3, 3);
- FillerParameter filler_param;
- filler_param.set_value(TypeParam(2));
- ConstantFiller<TypeParam> filler(filler_param);
- filler.Fill(this->blob_bottom_);
- PoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- EXPECT_EQ(this->blob_top_->num(), 1);
- EXPECT_EQ(this->blob_top_->channels(), 1);
- EXPECT_EQ(this->blob_top_->height(), 3);
- EXPECT_EQ(this->blob_top_->width(), 3);
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- TypeParam epsilon = 1e-5;
- EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[2], 8.0 / 9, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4.0 / 3, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[4], 2.0 , epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4.0 / 3, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[6], 8.0 / 9, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4.0 / 3, epsilon);
- EXPECT_NEAR(this->blob_top_->cpu_data()[8], 8.0 / 9, epsilon);
-}
-
-
-TYPED_TEST(PoolingLayerTest, TestCPUGradientAve) {
- for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
- for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
- LayerParameter layer_param;
- PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
- pooling_param->set_kernel_h(kernel_h);
- pooling_param->set_kernel_w(kernel_w);
- pooling_param->set_stride(2);
- pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
- Caffe::set_mode(Caffe::CPU);
- PoolingLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
- }
- }
-}
-
-
-TYPED_TEST(PoolingLayerTest, TestGPUGradientAve) {
- for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
- for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
- LayerParameter layer_param;
- PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
- pooling_param->set_kernel_h(kernel_h);
- pooling_param->set_kernel_w(kernel_w);
- pooling_param->set_stride(2);
- pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
- Caffe::set_mode(Caffe::GPU);
- PoolingLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
- }
- }
-}
-
-
-TYPED_TEST(PoolingLayerTest, TestCPUGradientAvePadded) {
+TYPED_TEST(PoolingLayerTest, TestGradientAve) {
+ typedef typename TypeParam::Dtype Dtype;
for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
LayerParameter layer_param;
pooling_param->set_kernel_h(kernel_h);
pooling_param->set_kernel_w(kernel_w);
pooling_param->set_stride(2);
- pooling_param->set_pad(2);
pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
- Caffe::set_mode(Caffe::CPU);
- PoolingLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
+ PoolingLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-2);
checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
}
}
-
-TYPED_TEST(PoolingLayerTest, TestGPUGradientAvePadded) {
+TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) {
+ typedef typename TypeParam::Dtype Dtype;
for (int kernel_h = 3; kernel_h <= 4; kernel_h++) {
for (int kernel_w = 3; kernel_w <= 4; kernel_w++) {
LayerParameter layer_param;
pooling_param->set_stride(2);
pooling_param->set_pad(2);
pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
- Caffe::set_mode(Caffe::GPU);
- PoolingLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
+ PoolingLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-2);
checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class PowerLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class PowerLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
PowerLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(PowerLayerTest, Dtypes);
-
-TYPED_TEST(PowerLayerTest, TestPowerCPU) {
- Caffe::set_mode(Caffe::CPU);
- TypeParam power = 0.37;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
- this->TestForward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerGradientCPU) {
- Caffe::set_mode(Caffe::CPU);
- TypeParam power = 0.37;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
- this->TestBackward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZeroCPU) {
- Caffe::set_mode(Caffe::CPU);
- TypeParam power = 0.37;
- TypeParam scale = 0.83;
- TypeParam shift = 0.0;
- this->TestBackward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerZeroCPU) {
- Caffe::set_mode(Caffe::CPU);
- TypeParam power = 0.0;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
- this->TestForward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerZeroGradientCPU) {
- Caffe::set_mode(Caffe::CPU);
- TypeParam power = 0.0;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
- this->TestBackward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerOneCPU) {
- Caffe::set_mode(Caffe::CPU);
- TypeParam power = 1.0;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
- this->TestForward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerOneGradientCPU) {
- Caffe::set_mode(Caffe::CPU);
- TypeParam power = 1.0;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
- this->TestBackward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerTwoCPU) {
- Caffe::set_mode(Caffe::CPU);
- TypeParam power = 2.0;
- TypeParam scale = 0.34;
- TypeParam shift = -2.4;
- this->TestForward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerTwoGradientCPU) {
- Caffe::set_mode(Caffe::CPU);
- TypeParam power = 2.0;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
- this->TestBackward(power, scale, shift);
-}
-
-TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradientCPU) {
- Caffe::set_mode(Caffe::CPU);
- TypeParam power = 2.0;
- TypeParam scale = 0.5;
- TypeParam shift = -2.4;
- this->TestBackward(power, scale, shift);
-}
+TYPED_TEST_CASE(PowerLayerTest, TestDtypesAndDevices);
-TYPED_TEST(PowerLayerTest, TestPowerGPU) {
- Caffe::set_mode(Caffe::GPU);
- TypeParam power = 0.37;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPower) {
+ typedef typename TypeParam::Dtype Dtype;
+ Dtype power = 0.37;
+ Dtype scale = 0.83;
+ Dtype shift = -2.4;
this->TestForward(power, scale, shift);
}
-TYPED_TEST(PowerLayerTest, TestPowerGradientGPU) {
- Caffe::set_mode(Caffe::GPU);
- TypeParam power = 0.37;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerGradient) {
+ typedef typename TypeParam::Dtype Dtype;
+ Dtype power = 0.37;
+ Dtype scale = 0.83;
+ Dtype shift = -2.4;
this->TestBackward(power, scale, shift);
}
-TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZeroGPU) {
- Caffe::set_mode(Caffe::GPU);
- TypeParam power = 0.37;
- TypeParam scale = 0.83;
- TypeParam shift = 0.0;
+TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZero) {
+ typedef typename TypeParam::Dtype Dtype;
+ Dtype power = 0.37;
+ Dtype scale = 0.83;
+ Dtype shift = 0.0;
this->TestBackward(power, scale, shift);
}
-TYPED_TEST(PowerLayerTest, TestPowerZeroGPU) {
- Caffe::set_mode(Caffe::GPU);
- TypeParam power = 0.0;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerZero) {
+ typedef typename TypeParam::Dtype Dtype;
+ Dtype power = 0.0;
+ Dtype scale = 0.83;
+ Dtype shift = -2.4;
this->TestForward(power, scale, shift);
}
-TYPED_TEST(PowerLayerTest, TestPowerZeroGradientGPU) {
- Caffe::set_mode(Caffe::GPU);
- TypeParam power = 0.0;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerZeroGradient) {
+ typedef typename TypeParam::Dtype Dtype;
+ Dtype power = 0.0;
+ Dtype scale = 0.83;
+ Dtype shift = -2.4;
this->TestBackward(power, scale, shift);
}
-TYPED_TEST(PowerLayerTest, TestPowerOneGPU) {
- Caffe::set_mode(Caffe::GPU);
- TypeParam power = 1.0;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerOne) {
+ typedef typename TypeParam::Dtype Dtype;
+ Dtype power = 1.0;
+ Dtype scale = 0.83;
+ Dtype shift = -2.4;
this->TestForward(power, scale, shift);
}
-TYPED_TEST(PowerLayerTest, TestPowerOneGradientGPU) {
- Caffe::set_mode(Caffe::GPU);
- TypeParam power = 1.0;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerOneGradient) {
+ typedef typename TypeParam::Dtype Dtype;
+ Dtype power = 1.0;
+ Dtype scale = 0.83;
+ Dtype shift = -2.4;
this->TestBackward(power, scale, shift);
}
-TYPED_TEST(PowerLayerTest, TestPowerTwoGPU) {
- Caffe::set_mode(Caffe::GPU);
- TypeParam power = 2.0;
- TypeParam scale = 0.34;
- TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerTwo) {
+ typedef typename TypeParam::Dtype Dtype;
+ Dtype power = 2.0;
+ Dtype scale = 0.34;
+ Dtype shift = -2.4;
this->TestForward(power, scale, shift);
}
-TYPED_TEST(PowerLayerTest, TestPowerTwoGradientGPU) {
- Caffe::set_mode(Caffe::GPU);
- TypeParam power = 2.0;
- TypeParam scale = 0.83;
- TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerTwoGradient) {
+ typedef typename TypeParam::Dtype Dtype;
+ Dtype power = 2.0;
+ Dtype scale = 0.83;
+ Dtype shift = -2.4;
this->TestBackward(power, scale, shift);
}
-TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradientGPU) {
- Caffe::set_mode(Caffe::GPU);
- TypeParam power = 2.0;
- TypeParam scale = 0.5;
- TypeParam shift = -2.4;
+TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradient) {
+ typedef typename TypeParam::Dtype Dtype;
+ Dtype power = 2.0;
+ Dtype scale = 0.5;
+ Dtype shift = -2.4;
this->TestBackward(power, scale, shift);
}
shared_ptr<SyncedMemory> int_data_2_;
};
-
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(RandomNumberGeneratorTest, Dtypes);
-
+TYPED_TEST_CASE(RandomNumberGeneratorTest, TestDtypes);
TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian) {
const TypeParam mu = 0;
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class SigmoidCrossEntropyLossLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
SigmoidCrossEntropyLossLayerTest()
: blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(SigmoidCrossEntropyLossLayerTest, Dtypes);
-
-
-TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLossCPU) {
- Caffe::set_mode(Caffe::CPU);
- this->TestForward();
-}
+TYPED_TEST_CASE(SigmoidCrossEntropyLossLayerTest, TestDtypesAndDevices);
-TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLossGPU) {
- Caffe::set_mode(Caffe::GPU);
+TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLoss) {
this->TestForward();
}
-TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradientCPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- SigmoidCrossEntropyLossLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
- checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 0, -1, -1);
-}
-
-TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradientGPU) {
+TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- SigmoidCrossEntropyLossLayer<TypeParam> layer(layer_param);
+ SigmoidCrossEntropyLossLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
+ GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_), 0, -1, -1);
}
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(SoftmaxLayerTest, Dtypes);
+TYPED_TEST_CASE(SoftmaxLayerTest, TestDtypes);
-TYPED_TEST(SoftmaxLayerTest, TestForwardCPU) {
+TYPED_TEST(SoftmaxLayerTest, TestForward) {
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
SoftmaxLayer<TypeParam> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
}
}
-TYPED_TEST(SoftmaxLayerTest, TestGradientCPU) {
+TYPED_TEST(SoftmaxLayerTest, TestGradient) {
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
SoftmaxLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class SoftmaxWithLossLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class SoftmaxWithLossLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
SoftmaxWithLossLayerTest()
: blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(SoftmaxWithLossLayerTest, Dtypes);
-
+TYPED_TEST_CASE(SoftmaxWithLossLayerTest, TestDtypesAndDevices);
-TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientCPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- SoftmaxWithLossLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
- checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 0, -1, -1);
-}
-TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientGPU) {
+TYPED_TEST(SoftmaxWithLossLayerTest, TestGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- SoftmaxWithLossLayer<TypeParam> layer(layer_param);
+ SoftmaxWithLossLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701);
+ GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_), 0, -1, -1);
}
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class SplitLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class SplitLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
+
protected:
SplitLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(SplitLayerTest, Dtypes);
+TYPED_TEST_CASE(SplitLayerTest, TestDtypesAndDevices);
TYPED_TEST(SplitLayerTest, TestSetup) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- SplitLayer<TypeParam> layer(layer_param);
+ SplitLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_a_->num(), 2);
EXPECT_EQ(this->blob_top_a_->channels(), 3);
EXPECT_EQ(this->blob_top_b_->width(), 5);
}
-TYPED_TEST(SplitLayerTest, TestCPU) {
+TYPED_TEST(SplitLayerTest, Test) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- SplitLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::CPU);
+ SplitLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- TypeParam bottom_value = this->blob_bottom_->cpu_data()[i];
+ Dtype bottom_value = this->blob_bottom_->cpu_data()[i];
EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]);
EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]);
}
}
-TYPED_TEST(SplitLayerTest, TestGPU) {
- LayerParameter layer_param;
- SplitLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::GPU);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- TypeParam bottom_value = this->blob_bottom_->cpu_data()[i];
- EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]);
- EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]);
- }
-}
-
-TYPED_TEST(SplitLayerTest, TestCPUInPlace) {
- LayerParameter layer_param;
- SplitLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::CPU);
- this->blob_top_vec_[0] = this->blob_bottom_vec_[0];
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- TypeParam bottom_value = this->blob_bottom_->cpu_data()[i];
- EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]);
- }
-}
-
-TYPED_TEST(SplitLayerTest, TestGPUInPlace) {
+TYPED_TEST(SplitLayerTest, TestInPlace) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- SplitLayer<TypeParam> layer(layer_param);
- Caffe::set_mode(Caffe::GPU);
+ SplitLayer<Dtype> layer(layer_param);
this->blob_top_vec_[0] = this->blob_bottom_vec_[0];
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- TypeParam bottom_value = this->blob_bottom_->cpu_data()[i];
+ Dtype bottom_value = this->blob_bottom_->cpu_data()[i];
EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]);
}
}
-TYPED_TEST(SplitLayerTest, TestCPUGradient) {
+TYPED_TEST(SplitLayerTest, TestGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- SplitLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(SplitLayerTest, TestGPUGradient) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- SplitLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(SplitLayerTest, TestCPUGradientInPlace) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- SplitLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
- this->blob_top_vec_[0] = this->blob_bottom_vec_[0];
+ SplitLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-2);
checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
-TYPED_TEST(SplitLayerTest, TestGPUGradientInPlace) {
+TYPED_TEST(SplitLayerTest, TestGradientInPlace) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- SplitLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-2);
+ SplitLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-2);
this->blob_top_vec_[0] = this->blob_bottom_vec_[0];
checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(StochasticPoolingLayerTest, Dtypes);
+TYPED_TEST_CASE(StochasticPoolingLayerTest, TestDtypes);
TYPED_TEST(StochasticPoolingLayerTest, TestSetup) {
LayerParameter layer_param;
delete p_mem;
}
-TEST_F(SyncedMemoryTest, TestAllocation) {
+TEST_F(SyncedMemoryTest, TestAllocationCPUGPU) {
SyncedMemory mem(10);
EXPECT_TRUE(mem.cpu_data());
EXPECT_TRUE(mem.gpu_data());
EXPECT_TRUE(mem.mutable_gpu_data());
}
+TEST_F(SyncedMemoryTest, TestAllocationCPU) {
+ SyncedMemory mem(10);
+ EXPECT_TRUE(mem.cpu_data());
+ EXPECT_TRUE(mem.mutable_cpu_data());
+}
+
+TEST_F(SyncedMemoryTest, TestAllocationGPU) {
+ SyncedMemory mem(10);
+ EXPECT_TRUE(mem.gpu_data());
+ EXPECT_TRUE(mem.mutable_gpu_data());
+}
+
TEST_F(SyncedMemoryTest, TestCPUWrite) {
SyncedMemory mem(10);
void* cpu_data = mem.mutable_cpu_data();
EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU);
memset(cpu_data, 1, mem.size());
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ((reinterpret_cast<char*>(cpu_data))[i], 1);
+ EXPECT_EQ((static_cast<char*>(cpu_data))[i], 1);
}
// do another round
cpu_data = mem.mutable_cpu_data();
EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU);
memset(cpu_data, 2, mem.size());
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ((reinterpret_cast<char*>(cpu_data))[i], 2);
+ EXPECT_EQ((static_cast<char*>(cpu_data))[i], 2);
}
}
char* recovered_value = new char[10];
caffe_gpu_memcpy(10, gpu_data, recovered_value);
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ((reinterpret_cast<char*>(recovered_value))[i], 1);
+ EXPECT_EQ((static_cast<char*>(recovered_value))[i], 1);
}
// do another round
cpu_data = mem.mutable_cpu_data();
EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU);
memset(cpu_data, 2, mem.size());
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ((reinterpret_cast<char*>(cpu_data))[i], 2);
+ EXPECT_EQ((static_cast<char*>(cpu_data))[i], 2);
}
gpu_data = mem.gpu_data();
EXPECT_EQ(mem.head(), SyncedMemory::SYNCED);
// check if values are the same
caffe_gpu_memcpy(10, gpu_data, recovered_value);
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ((reinterpret_cast<char*>(recovered_value))[i], 2);
+ EXPECT_EQ((static_cast<char*>(recovered_value))[i], 2);
}
delete[] recovered_value;
}
CUDA_CHECK(cudaMemset(gpu_data, 1, mem.size()));
const void* cpu_data = mem.cpu_data();
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ((reinterpret_cast<const char*>(cpu_data))[i], 1);
+ EXPECT_EQ((static_cast<const char*>(cpu_data))[i], 1);
}
EXPECT_EQ(mem.head(), SyncedMemory::SYNCED);
CUDA_CHECK(cudaMemset(gpu_data, 2, mem.size()));
cpu_data = mem.cpu_data();
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ((reinterpret_cast<const char*>(cpu_data))[i], 2);
+ EXPECT_EQ((static_cast<const char*>(cpu_data))[i], 2);
}
EXPECT_EQ(mem.head(), SyncedMemory::SYNCED);
}
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class TanHLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class TanHLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
protected:
TanHLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 10, 1, 1)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(TanHLayerTest, Dtypes);
+TYPED_TEST_CASE(TanHLayerTest, TestDtypesAndDevices);
-TYPED_TEST(TanHLayerTest, TestForwardCPU) {
+TYPED_TEST(TanHLayerTest, TestForward) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- TanHLayer<TypeParam> layer(layer_param);
+ TanHLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Test exact values
}
}
-TYPED_TEST(TanHLayerTest, TestGradientCPU) {
+TYPED_TEST(TanHLayerTest, TestGradient) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- TanHLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
-}
-
-TYPED_TEST(TanHLayerTest, TestForwardGPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- TanHLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- // Test exact values
- for (int i = 0; i < this->blob_bottom_->num(); ++i) {
- for (int j = 0; j < this->blob_bottom_->channels(); ++j) {
- for (int k = 0; k < this->blob_bottom_->height(); ++k) {
- for (int l = 0; l < this->blob_bottom_->width(); ++l) {
- EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4,
- (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
- (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
- EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4,
- (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
- (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
- }
- }
- }
- }
-}
-
-TYPED_TEST(TanHLayerTest, TestGradientGPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- TanHLayer<TypeParam> layer(layer_param);
- GradientChecker<TypeParam> checker(1e-2, 1e-3);
+ TanHLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3);
checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
&(this->blob_top_vec_));
}
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-template <typename Dtype>
-class ThresholdLayerTest : public ::testing::Test {
+template <typename TypeParam>
+class ThresholdLayerTest : public MultiDeviceTest<TypeParam> {
+ typedef typename TypeParam::Dtype Dtype;
protected:
ThresholdLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
vector<Blob<Dtype>*> blob_top_vec_;
};
-typedef ::testing::Types<float, double> Dtypes;
-TYPED_TEST_CASE(ThresholdLayerTest, Dtypes);
+TYPED_TEST_CASE(ThresholdLayerTest, TestDtypesAndDevices);
TYPED_TEST(ThresholdLayerTest, TestSetup) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- ThresholdLayer<TypeParam> layer(layer_param);
+ ThresholdLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_->width());
}
-TYPED_TEST(ThresholdLayerTest, TestCPU) {
+TYPED_TEST(ThresholdLayerTest, Test) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
- ThresholdLayer<TypeParam> layer(layer_param);
+ ThresholdLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
- const TypeParam threshold_ = layer_param.threshold_param().threshold();
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
+ const Dtype threshold_ = layer_param.threshold_param().threshold();
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_GE(top_data[i], 0.);
EXPECT_LE(top_data[i], 1.);
}
}
-TYPED_TEST(ThresholdLayerTest, TestCPU2) {
+TYPED_TEST(ThresholdLayerTest, Test2) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
- Caffe::set_mode(Caffe::CPU);
ThresholdParameter* threshold_param =
layer_param.mutable_threshold_param();
threshold_param->set_threshold(0.5);
- ThresholdLayer<TypeParam> layer(layer_param);
+ ThresholdLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
// Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
- const TypeParam threshold_ = layer_param.threshold_param().threshold();
- EXPECT_FLOAT_EQ(threshold_, 0.5);
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- EXPECT_GE(top_data[i], 0.);
- EXPECT_LE(top_data[i], 1.);
- if (top_data[i] == 0) {
- EXPECT_LE(bottom_data[i], threshold_);
- }
- if (top_data[i] == 1) {
- EXPECT_GT(bottom_data[i], threshold_);
- }
- }
-}
-
-TYPED_TEST(ThresholdLayerTest, TestGPU) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- ThresholdLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- // Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
- const TypeParam threshold_ = layer_param.threshold_param().threshold();
- for (int i = 0; i < this->blob_bottom_->count(); ++i) {
- EXPECT_GE(top_data[i], 0.);
- EXPECT_LE(top_data[i], 1.);
- if (top_data[i] == 0) {
- EXPECT_LE(bottom_data[i], threshold_);
- }
- if (top_data[i] == 1) {
- EXPECT_GT(bottom_data[i], threshold_);
- }
- }
-}
-
-TYPED_TEST(ThresholdLayerTest, TestGPU2) {
- LayerParameter layer_param;
- Caffe::set_mode(Caffe::GPU);
- ThresholdParameter* threshold_param =
- layer_param.mutable_threshold_param();
- threshold_param->set_threshold(0.5);
- ThresholdLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
- // Now, check values
- const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
- const TypeParam* top_data = this->blob_top_->cpu_data();
- const TypeParam threshold_ = layer_param.threshold_param().threshold();
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
+ const Dtype threshold_ = layer_param.threshold_param().threshold();
EXPECT_FLOAT_EQ(threshold_, 0.5);
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_GE(top_data[i], 0.);
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
-typedef ::testing::Types<float, double> Dtypes;
-
-template <typename Dtype>
+template <typename TypeParam>
class GemmTest : public ::testing::Test {};
-TYPED_TEST_CASE(GemmTest, Dtypes);
+TYPED_TEST_CASE(GemmTest, TestDtypes);
-TYPED_TEST(GemmTest, TestGemm) {
+TYPED_TEST(GemmTest, TestGemmCPUGPU) {
Blob<TypeParam> A(1, 1, 2, 3);
Blob<TypeParam> B(1, 1, 3, 4);
Blob<TypeParam> C(1, 1, 2, 4);
}
-TYPED_TEST(GemmTest, TestGemv) {
+TYPED_TEST(GemmTest, TestGemvCPUGPU) {
Blob<TypeParam> A(1, 1, 2, 3);
Blob<TypeParam> x(1, 1, 1, 3);
Blob<TypeParam> y(1, 1, 1, 2);