From 1816561936b8173f78659850de4e8e1da677bd7a Mon Sep 17 00:00:00 2001 From: Jeff Donahue Date: Fri, 14 Mar 2014 20:56:07 -0700 Subject: [PATCH] update tests for new proto format; now they compile --- src/caffe/test/test_concat_layer.cpp | 2 +- src/caffe/test/test_convolution_layer.cpp | 98 +++++++++++++++++------------- src/caffe/test/test_data_layer.cpp | 5 +- src/caffe/test/test_hdf5data_layer.cpp | 19 +++--- src/caffe/test/test_im2col_layer.cpp | 30 ++++++--- src/caffe/test/test_innerproduct_layer.cpp | 48 +++++++++------ src/caffe/test/test_lrn_layer.cpp | 7 ++- src/caffe/test/test_neuron_layer.cpp | 8 +-- src/caffe/test/test_pooling_layer.cpp | 40 ++++++------ src/caffe/test/test_stochastic_pooling.cpp | 29 ++++----- 10 files changed, 165 insertions(+), 121 deletions(-) diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index 8ce7ce1..72e3c90 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -60,7 +60,7 @@ TYPED_TEST_CASE(ConcatLayerTest, Dtypes); TYPED_TEST(ConcatLayerTest, TestSetupNum) { LayerParameter layer_param; - layer_param.set_concat_dim(0); + layer_param.mutable_concat_param()->set_concat_dim(0); ConcatLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_1, &(this->blob_top_vec_)); EXPECT_EQ(this->blob_top_->num(), diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index db23680..c8d7908 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -46,9 +46,11 @@ TYPED_TEST_CASE(ConvolutionLayerTest, Dtypes); TYPED_TEST(ConvolutionLayerTest, TestSetup) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_num_output(4); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); shared_ptr > layer( new ConvolutionLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -57,8 +59,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSetup) { EXPECT_EQ(this->blob_top_->height(), 2); EXPECT_EQ(this->blob_top_->width(), 2); // setting group should not change the shape - layer_param.set_num_output(3); - layer_param.set_group(3); + convolution_param->set_num_output(3); + convolution_param->set_group(3); layer.reset(new ConvolutionLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); EXPECT_EQ(this->blob_top_->num(), 2); @@ -74,13 +76,15 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) { ConstantFiller filler(filler_param); filler.Fill(this->blob_bottom_); LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_num_output(4); - layer_param.mutable_weight_filler()->set_type("constant"); - layer_param.mutable_weight_filler()->set_value(1); - layer_param.mutable_bias_filler()->set_type("constant"); - layer_param.mutable_bias_filler()->set_value(0.1); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); shared_ptr > layer( new ConvolutionLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -120,14 +124,16 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) { } } LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_num_output(3); - layer_param.set_group(3); - layer_param.mutable_weight_filler()->set_type("constant"); - layer_param.mutable_weight_filler()->set_value(1); - layer_param.mutable_bias_filler()->set_type("constant"); - layer_param.mutable_bias_filler()->set_value(0.1); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); shared_ptr > layer( new ConvolutionLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -167,11 +173,13 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) { TYPED_TEST(ConvolutionLayerTest, TestCPUGradient) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_num_output(2); - layer_param.mutable_weight_filler()->set_type("gaussian"); - layer_param.mutable_bias_filler()->set_type("gaussian"); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); Caffe::set_mode(Caffe::CPU); ConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); @@ -181,12 +189,14 @@ TYPED_TEST(ConvolutionLayerTest, TestCPUGradient) { TYPED_TEST(ConvolutionLayerTest, TestCPUGradientGroup) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_num_output(3); - layer_param.set_group(3); - layer_param.mutable_weight_filler()->set_type("gaussian"); - layer_param.mutable_bias_filler()->set_type("gaussian"); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); Caffe::set_mode(Caffe::CPU); ConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); @@ -196,11 +206,13 @@ TYPED_TEST(ConvolutionLayerTest, TestCPUGradientGroup) { TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_num_output(2); - layer_param.mutable_weight_filler()->set_type("gaussian"); - layer_param.mutable_bias_filler()->set_type("gaussian"); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); Caffe::set_mode(Caffe::GPU); ConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); @@ -210,12 +222,14 @@ TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) { TYPED_TEST(ConvolutionLayerTest, TestGPUGradientGroup) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_num_output(3); - layer_param.set_group(3); - layer_param.mutable_weight_filler()->set_type("gaussian"); - layer_param.mutable_bias_filler()->set_type("gaussian"); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); Caffe::set_mode(Caffe::GPU); ConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp index 032b0eb..6b0838a 100644 --- a/src/caffe/test/test_data_layer.cpp +++ b/src/caffe/test/test_data_layer.cpp @@ -70,8 +70,9 @@ TYPED_TEST_CASE(DataLayerTest, Dtypes); TYPED_TEST(DataLayerTest, TestRead) { LayerParameter param; - param.set_batchsize(5); - param.set_source(this->filename); + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(5); + data_param->set_source(this->filename); DataLayer layer(param); layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); EXPECT_EQ(this->blob_top_data_->num(), 5); diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp index b03fe72..a0ed113 100644 --- a/src/caffe/test/test_hdf5data_layer.cpp +++ b/src/caffe/test/test_hdf5data_layer.cpp @@ -57,9 +57,10 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { // The data file we are reading has 10 rows and 8 columns, // with values from 0 to 10*8 reshaped in row-major order. LayerParameter param; - int batchsize = 5; - param.set_batchsize(batchsize); - param.set_source(*(this->filename)); + HDF5DataParameter* hdf5_data_param = param.mutable_hdf5_data_param(); + int batch_size = 5; + hdf5_data_param->set_batch_size(batch_size); + hdf5_data_param->set_source(*(this->filename)); int num_rows = 10; int num_cols = 8; int height = 5; @@ -68,12 +69,12 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { // Test that the layer setup got the correct parameters. HDF5DataLayer layer(param); layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); - EXPECT_EQ(this->blob_top_data_->num(), batchsize); + EXPECT_EQ(this->blob_top_data_->num(), batch_size); EXPECT_EQ(this->blob_top_data_->channels(), num_cols); EXPECT_EQ(this->blob_top_data_->height(), height); EXPECT_EQ(this->blob_top_data_->width(), width); - EXPECT_EQ(this->blob_top_label_->num(), batchsize); + EXPECT_EQ(this->blob_top_label_->num(), batch_size); EXPECT_EQ(this->blob_top_label_->channels(), 1); EXPECT_EQ(this->blob_top_label_->height(), 1); EXPECT_EQ(this->blob_top_label_->width(), 1); @@ -94,20 +95,20 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { // On even iterations, we're reading the first half of the data. // On odd iterations, we're reading the second half of the data. - int label_offset = (iter % 2 == 0) ? 0 : batchsize; - int data_offset = (iter % 2 == 0) ? 0 : batchsize * data_size; + int label_offset = (iter % 2 == 0) ? 0 : batch_size; + int data_offset = (iter % 2 == 0) ? 0 : batch_size * data_size; // Every two iterations we are reading the second file, // which has the same labels, but data is offset by total data size, // which is 2000 (see generate_sample_data). int file_offset = (iter % 4 < 2) ? 0 : 2000; - for (int i = 0; i < batchsize; ++i) { + for (int i = 0; i < batch_size; ++i) { EXPECT_EQ( label_offset + i, this->blob_top_label_->cpu_data()[i]); } - for (int i = 0; i < batchsize; ++i) { + for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < num_cols; ++j) { for (int h = 0; h < height; ++h) { for (int w = 0; w < width; ++w) { diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp index 31a0115..7f677ca 100644 --- a/src/caffe/test/test_im2col_layer.cpp +++ b/src/caffe/test/test_im2col_layer.cpp @@ -42,8 +42,10 @@ TYPED_TEST_CASE(Im2colLayerTest, Dtypes); TYPED_TEST(Im2colLayerTest, TestSetup) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); Im2colLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); EXPECT_EQ(this->blob_top_->num(), 2); @@ -54,8 +56,10 @@ TYPED_TEST(Im2colLayerTest, TestSetup) { TYPED_TEST(Im2colLayerTest, TestCPU) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); Im2colLayer layer(layer_param); Caffe::set_mode(Caffe::CPU); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -69,8 +73,10 @@ TYPED_TEST(Im2colLayerTest, TestCPU) { TYPED_TEST(Im2colLayerTest, TestGPU) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); Im2colLayer layer(layer_param); Caffe::set_mode(Caffe::GPU); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -84,8 +90,10 @@ TYPED_TEST(Im2colLayerTest, TestGPU) { TYPED_TEST(Im2colLayerTest, TestCPUGradient) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); Caffe::set_mode(Caffe::CPU); Im2colLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); @@ -95,8 +103,10 @@ TYPED_TEST(Im2colLayerTest, TestCPUGradient) { TYPED_TEST(Im2colLayerTest, TestGPUGradient) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); Caffe::set_mode(Caffe::GPU); Im2colLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); diff --git a/src/caffe/test/test_innerproduct_layer.cpp b/src/caffe/test/test_innerproduct_layer.cpp index 5b18317..91917df 100644 --- a/src/caffe/test/test_innerproduct_layer.cpp +++ b/src/caffe/test/test_innerproduct_layer.cpp @@ -42,7 +42,9 @@ TYPED_TEST_CASE(InnerProductLayerTest, Dtypes); TYPED_TEST(InnerProductLayerTest, TestSetUp) { LayerParameter layer_param; - layer_param.set_num_output(10); + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); shared_ptr > layer( new InnerProductLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -54,12 +56,14 @@ TYPED_TEST(InnerProductLayerTest, TestSetUp) { TYPED_TEST(InnerProductLayerTest, TestCPU) { LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); Caffe::set_mode(Caffe::CPU); - layer_param.set_num_output(10); - layer_param.mutable_weight_filler()->set_type("uniform"); - layer_param.mutable_bias_filler()->set_type("uniform"); - layer_param.mutable_bias_filler()->set_min(1); - layer_param.mutable_bias_filler()->set_max(2); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); shared_ptr > layer( new InnerProductLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -74,12 +78,14 @@ TYPED_TEST(InnerProductLayerTest, TestCPU) { TYPED_TEST(InnerProductLayerTest, TestGPU) { if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); Caffe::set_mode(Caffe::GPU); - layer_param.set_num_output(10); - layer_param.mutable_weight_filler()->set_type("uniform"); - layer_param.mutable_bias_filler()->set_type("uniform"); - layer_param.mutable_bias_filler()->set_min(1); - layer_param.mutable_bias_filler()->set_max(2); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); shared_ptr > layer( new InnerProductLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -96,12 +102,14 @@ TYPED_TEST(InnerProductLayerTest, TestGPU) { TYPED_TEST(InnerProductLayerTest, TestCPUGradient) { LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); Caffe::set_mode(Caffe::CPU); - layer_param.set_num_output(10); - layer_param.mutable_weight_filler()->set_type("gaussian"); - layer_param.mutable_bias_filler()->set_type("gaussian"); - layer_param.mutable_bias_filler()->set_min(1); - layer_param.mutable_bias_filler()->set_max(2); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); InnerProductLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), @@ -111,10 +119,12 @@ TYPED_TEST(InnerProductLayerTest, TestCPUGradient) { TYPED_TEST(InnerProductLayerTest, TestGPUGradient) { if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); Caffe::set_mode(Caffe::GPU); - layer_param.set_num_output(10); - layer_param.mutable_weight_filler()->set_type("gaussian"); - layer_param.mutable_bias_filler()->set_type("gaussian"); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_type("gaussian"); InnerProductLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); checker.CheckGradient(&layer, &(this->blob_bottom_vec_), diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index a96684d..6ad6d02 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -54,9 +54,10 @@ void LRNLayerTest::ReferenceLRNForward( blob_bottom.height(), blob_bottom.width()); const Dtype* bottom_data = blob_bottom.cpu_data(); Dtype* top_data = blob_top->mutable_cpu_data(); - Dtype alpha = layer_param.alpha(); - Dtype beta = layer_param.beta(); - int size = layer_param.local_size(); + LRNParameter lrn_param = layer_param.lrn_param(); + Dtype alpha = lrn_param.alpha(); + Dtype beta = lrn_param.beta(); + int size = lrn_param.local_size(); for (int n = 0; n < blob_bottom.num(); ++n) { for (int c = 0; c < blob_bottom.channels(); ++c) { for (int h = 0; h < blob_bottom.height(); ++h) { diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 105f321..cd73375 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -158,7 +158,7 @@ TYPED_TEST(NeuronLayerTest, TestDropoutCPU) { // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); - float scale = 1. / (1. - layer_param.dropout_ratio()); + float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); for (int i = 0; i < this->blob_bottom_->count(); ++i) { if (top_data[i] != 0) { EXPECT_EQ(top_data[i], bottom_data[i] * scale); @@ -187,7 +187,7 @@ TYPED_TEST(NeuronLayerTest, TestDropoutCPUTestPhase) { // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); - float scale = 1. / (1. - layer_param.dropout_ratio()); + float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); for (int i = 0; i < this->blob_bottom_->count(); ++i) { if (top_data[i] != 0) { EXPECT_EQ(top_data[i], bottom_data[i]); @@ -206,7 +206,7 @@ TYPED_TEST(NeuronLayerTest, TestDropoutGPU) { // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); - float scale = 1. / (1. - layer_param.dropout_ratio()); + float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); for (int i = 0; i < this->blob_bottom_->count(); ++i) { if (top_data[i] != 0) { EXPECT_EQ(top_data[i], bottom_data[i] * scale); @@ -241,7 +241,7 @@ TYPED_TEST(NeuronLayerTest, TestDropoutGPUTestPhase) { // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); - float scale = 1. / (1. - layer_param.dropout_ratio()); + float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); for (int i = 0; i < this->blob_bottom_->count(); ++i) { if (top_data[i] != 0) { EXPECT_EQ(top_data[i], bottom_data[i]); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 11b9ce2..d1246a0 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -45,8 +45,9 @@ TYPED_TEST_CASE(PoolingLayerTest, Dtypes); TYPED_TEST(PoolingLayerTest, TestSetup) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); @@ -58,9 +59,10 @@ TYPED_TEST(PoolingLayerTest, TestSetup) { /* TYPED_TEST(PoolingLayerTest, PrintGPUBackward) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_pool(LayerParameter_PoolMethod_MAX); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); Caffe::set_mode(Caffe::GPU); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -84,9 +86,10 @@ TYPED_TEST(PoolingLayerTest, PrintGPUBackward) { TYPED_TEST(PoolingLayerTest, TestCPUGradientMax) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_pool(LayerParameter_PoolMethod_MAX); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); Caffe::set_mode(Caffe::CPU); PoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); @@ -96,9 +99,10 @@ TYPED_TEST(PoolingLayerTest, TestCPUGradientMax) { TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_pool(LayerParameter_PoolMethod_MAX); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); Caffe::set_mode(Caffe::GPU); PoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); @@ -109,9 +113,10 @@ TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) { TYPED_TEST(PoolingLayerTest, TestCPUGradientAve) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_pool(LayerParameter_PoolMethod_AVE); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); Caffe::set_mode(Caffe::CPU); PoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); @@ -122,9 +127,10 @@ TYPED_TEST(PoolingLayerTest, TestCPUGradientAve) { TYPED_TEST(PoolingLayerTest, TestGPUGradientAve) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - layer_param.set_pool(LayerParameter_PoolMethod_AVE); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); Caffe::set_mode(Caffe::GPU); PoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index daf2c36..0ad8123 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -54,8 +54,9 @@ TYPED_TEST_CASE(StochasticPoolingLayerTest, Dtypes); TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); @@ -68,10 +69,10 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { Caffe::set_mode(Caffe::GPU); Caffe::set_phase(Caffe::TRAIN); LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - - layer_param.set_pool(LayerParameter_PoolMethod_STOCHASTIC); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -112,10 +113,10 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { Caffe::set_mode(Caffe::GPU); Caffe::set_phase(Caffe::TEST); LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - - layer_param.set_pool(LayerParameter_PoolMethod_STOCHASTIC); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); @@ -150,10 +151,10 @@ TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { Caffe::set_mode(Caffe::GPU); Caffe::set_phase(Caffe::TRAIN); LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); - - layer_param.set_pool(LayerParameter_PoolMethod_STOCHASTIC); + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); PoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); // it is too expensive to call curand multiple times, so we don't do an -- 2.7.4