From c5dd18f9a8d06df3ea4f853e3da4062cc162b615 Mon Sep 17 00:00:00 2001 From: Jeff Donahue Date: Sun, 6 Apr 2014 23:50:43 -0700 Subject: [PATCH] separate CPU from GPU ConvolutionLayerTests --- src/caffe/test/test_convolution_layer.cpp | 76 +++++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 14 deletions(-) diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 1f8b076..b08486e 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -69,7 +69,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSetup) { EXPECT_EQ(this->blob_top_->width(), 1); } -TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) { +TYPED_TEST(ConvolutionLayerTest, TestCPUSimpleConvolution) { // We will simply see if the convolution layer carries out averaging well. FillerParameter filler_param; filler_param.set_value(1.); @@ -93,21 +93,39 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) { // After the convolution, the output should all have output values 27.1 const TypeParam* top_data = this->blob_top_->cpu_data(); for (int i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_GE(top_data[i], 27.1 - 1e-4); - EXPECT_LE(top_data[i], 27.1 + 1e-4); + EXPECT_NEAR(top_data[i], 27.1, 1e-4); } - // Test GPU +} + +TYPED_TEST(ConvolutionLayerTest, TestGPUSimpleConvolution) { + // We will simply see if the convolution layer carries out averaging well. + FillerParameter filler_param; + filler_param.set_value(1.); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); Caffe::set_mode(Caffe::GPU); layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); // After the convolution, the output should all have output values 27.1 - top_data = this->blob_top_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); for (int i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_GE(top_data[i], 27.1 - 1e-4); - EXPECT_LE(top_data[i], 27.1 + 1e-4); + EXPECT_NEAR(top_data[i], 27.1, 1e-4); } } -TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) { +TYPED_TEST(ConvolutionLayerTest, TestCPUSimpleConvolutionGroup) { // We will simply see if the convolution layer carries out averaging well. FillerParameter filler_param; filler_param.set_value(1.); @@ -146,24 +164,54 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) { for (int h = 0; h < this->blob_top_->height(); ++h) { for (int w = 0; w < this->blob_top_->width(); ++w) { TypeParam data = top_data[this->blob_top_->offset(n, c, h, w)]; - EXPECT_GE(data, c * 9 + 0.1 - 1e-4); - EXPECT_LE(data, c * 9 + 0.1 + 1e-4); + EXPECT_NEAR(data, c * 9 + 0.1, 1e-4); + } + } + } + } +} + + +TYPED_TEST(ConvolutionLayerTest, TestGPUSimpleConvolutionGroup) { + // We will simply see if the convolution layer carries out averaging well. + FillerParameter filler_param; + filler_param.set_value(1.); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + TypeParam* bottom_data = this->blob_bottom_->mutable_cpu_data(); + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + for (int c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int h = 0; h < this->blob_bottom_->height(); ++h) { + for (int w = 0; w < this->blob_bottom_->width(); ++w) { + bottom_data[this->blob_bottom_->offset(n, c, h, w)] = c; } } } } - // Test GPU + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); Caffe::set_mode(Caffe::GPU); layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); // After the convolution, the output should all have output values 9.1 - top_data = this->blob_top_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); for (int n = 0; n < this->blob_top_->num(); ++n) { for (int c = 0; c < this->blob_top_->channels(); ++c) { for (int h = 0; h < this->blob_top_->height(); ++h) { for (int w = 0; w < this->blob_top_->width(); ++w) { TypeParam data = top_data[this->blob_top_->offset(n, c, h, w)]; - EXPECT_GE(data, c * 9 + 0.1 - 1e-4); - EXPECT_LE(data, c * 9 + 0.1 + 1e-4); + EXPECT_NEAR(data, c * 9 + 0.1, 1e-4); } } } -- 2.7.4