--- /dev/null
+// Copyright 2014 BVLC and contributors.
+
+#include <vector>
+
+#include "cuda_runtime.h"
+#include "gtest/gtest.h"
+#include "caffe/blob.hpp"
+#include "caffe/common.hpp"
+#include "caffe/filler.hpp"
+#include "caffe/vision_layers.hpp"
+#include "caffe/test/test_gradient_check_util.hpp"
+
+#include "caffe/test/test_caffe_main.hpp"
+
+namespace caffe {
+
+extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
+
+template <typename Dtype>
+class EltwiseProductLayerTest : public ::testing::Test {
+ protected:
+ EltwiseProductLayerTest()
+ : blob_bottom_a_(new Blob<Dtype>(2, 3, 4, 5)),
+ blob_bottom_b_(new Blob<Dtype>(2, 3, 4, 5)),
+ blob_bottom_c_(new Blob<Dtype>(2, 3, 4, 5)),
+ blob_top_(new Blob<Dtype>()) {
+ // fill the values
+ FillerParameter filler_param;
+ UniformFiller<Dtype> filler(filler_param);
+ filler.Fill(this->blob_bottom_a_);
+ filler.Fill(this->blob_bottom_b_);
+ filler.Fill(this->blob_bottom_c_);
+ blob_bottom_vec_.push_back(blob_bottom_a_);
+ blob_bottom_vec_.push_back(blob_bottom_b_);
+ blob_bottom_vec_.push_back(blob_bottom_c_);
+ blob_top_vec_.push_back(blob_top_);
+ }
+ virtual ~EltwiseProductLayerTest() {
+ delete blob_bottom_a_;
+ delete blob_bottom_b_;
+ delete blob_bottom_c_;
+ delete blob_top_;
+ }
+ Blob<Dtype>* const blob_bottom_a_;
+ Blob<Dtype>* const blob_bottom_b_;
+ Blob<Dtype>* const blob_bottom_c_;
+ Blob<Dtype>* const blob_top_;
+ vector<Blob<Dtype>*> blob_bottom_vec_;
+ vector<Blob<Dtype>*> blob_top_vec_;
+};
+
+typedef ::testing::Types<float, double> Dtypes;
+TYPED_TEST_CASE(EltwiseProductLayerTest, Dtypes);
+
+TYPED_TEST(EltwiseProductLayerTest, TestSetUp) {
+ LayerParameter layer_param;
+ shared_ptr<EltwiseProductLayer<TypeParam> > layer(
+ new EltwiseProductLayer<TypeParam>(layer_param));
+ layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ EXPECT_EQ(this->blob_top_->num(), 2);
+ EXPECT_EQ(this->blob_top_->channels(), 3);
+ EXPECT_EQ(this->blob_top_->height(), 4);
+ EXPECT_EQ(this->blob_top_->width(), 5);
+}
+
+TYPED_TEST(EltwiseProductLayerTest, TestCPU) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ shared_ptr<EltwiseProductLayer<TypeParam> > layer(
+ new EltwiseProductLayer<TypeParam>(layer_param));
+ layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ const TypeParam* data = this->blob_top_->cpu_data();
+ const int count = this->blob_top_->count();
+ const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
+ const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
+ const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
+ for (int i = 0; i < count; ++i) {
+ EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]);
+ }
+}
+
+TYPED_TEST(EltwiseProductLayerTest, TestGPU) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ shared_ptr<EltwiseProductLayer<TypeParam> > layer(
+ new EltwiseProductLayer<TypeParam>(layer_param));
+ layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ const TypeParam* data = this->blob_top_->cpu_data();
+ const int count = this->blob_top_->count();
+ const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data();
+ const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data();
+ const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data();
+ for (int i = 0; i < count; ++i) {
+ EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]);
+ }
+}
+
+TYPED_TEST(EltwiseProductLayerTest, TestCPUGradient) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ EltwiseProductLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-3);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(EltwiseProductLayerTest, TestGPUGradient) {
+ if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ EltwiseProductLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-2);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+ } else {
+ LOG(ERROR) << "Skipping test due to old architecture.";
+ }
+}
+
+} // namespace caffe
--- /dev/null
+// Copyright 2014 BVLC and contributors.
+
+#include <algorithm>
+#include <vector>
+
+#include "cuda_runtime.h"
+#include "gtest/gtest.h"
+
+#include "caffe/blob.hpp"
+#include "caffe/common.hpp"
+#include "caffe/filler.hpp"
+#include "caffe/vision_layers.hpp"
+#include "caffe/test/test_gradient_check_util.hpp"
+
+#include "caffe/test/test_caffe_main.hpp"
+
+using std::min;
+using std::max;
+
+namespace caffe {
+
+extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
+
+template <typename Dtype>
+class LRNMapLayerTest : public ::testing::Test {
+ protected:
+ LRNMapLayerTest()
+ : blob_bottom_(new Blob<Dtype>()),
+ blob_top_(new Blob<Dtype>()) {}
+ virtual void SetUp() {
+ Caffe::set_random_seed(1701);
+ blob_bottom_->Reshape(2, 2, 7, 7);
+ // fill the values
+ FillerParameter filler_param;
+ GaussianFiller<Dtype> filler(filler_param);
+ filler.Fill(this->blob_bottom_);
+ blob_bottom_vec_.push_back(blob_bottom_);
+ blob_top_vec_.push_back(blob_top_);
+ epsilon_ = 1e-5;
+ }
+ virtual ~LRNMapLayerTest() { delete blob_bottom_; delete blob_top_; }
+ void ReferenceLRNMapForward(const Blob<Dtype>& blob_bottom,
+ const LayerParameter& layer_param, Blob<Dtype>* blob_top);
+ Blob<Dtype>* const blob_bottom_;
+ Blob<Dtype>* const blob_top_;
+ vector<Blob<Dtype>*> blob_bottom_vec_;
+ vector<Blob<Dtype>*> blob_top_vec_;
+ Dtype epsilon_;
+};
+
+template <typename Dtype>
+void LRNMapLayerTest<Dtype>::ReferenceLRNMapForward(
+ const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
+ Blob<Dtype>* blob_top) {
+ blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
+ blob_bottom.height(), blob_bottom.width());
+ const Dtype* bottom_data = blob_bottom.cpu_data();
+ Dtype* top_data = blob_top->mutable_cpu_data();
+ const Dtype alpha = layer_param.lrn_map_param().alpha();
+ const Dtype beta = layer_param.lrn_map_param().beta();
+ const int size = layer_param.lrn_map_param().local_size();
+ for (int n = 0; n < blob_bottom.num(); ++n) {
+ for (int c = 0; c < blob_bottom.channels(); ++c) {
+ for (int h = 0; h < blob_bottom.height(); ++h) {
+ int h_start = h - (size - 1) / 2;
+ int h_end = min(h_start + size, blob_bottom.height());
+ h_start = max(h_start, 0);
+ for (int w = 0; w < blob_bottom.width(); ++w) {
+ Dtype scale = 1.;
+ int w_start = w - (size - 1) / 2;
+ int w_end = min(w_start + size, blob_bottom.width());
+ w_start = max(w_start, 0);
+ for (int nh = h_start; nh < h_end; ++nh) {
+ for (int nw = w_start; nw < w_end; ++nw) {
+ Dtype value = blob_bottom.data_at(n, c, nh, nw);
+ scale += value * value * alpha / (size * size);
+ }
+ }
+ *(top_data + blob_top->offset(n, c, h, w)) =
+ blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
+ }
+ }
+ }
+ }
+}
+
+typedef ::testing::Types<float, double> Dtypes;
+TYPED_TEST_CASE(LRNMapLayerTest, Dtypes);
+
+TYPED_TEST(LRNMapLayerTest, TestSetup) {
+ LayerParameter layer_param;
+ LRNMapLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ EXPECT_EQ(this->blob_top_->num(), 2);
+ EXPECT_EQ(this->blob_top_->channels(), 2);
+ EXPECT_EQ(this->blob_top_->height(), 7);
+ EXPECT_EQ(this->blob_top_->width(), 7);
+}
+
+TYPED_TEST(LRNMapLayerTest, TestCPUForward) {
+ LayerParameter layer_param;
+ LRNMapLayer<TypeParam> layer(layer_param);
+ Caffe::set_mode(Caffe::CPU);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ Blob<TypeParam> top_reference;
+ this->ReferenceLRNMapForward(*(this->blob_bottom_), layer_param,
+ &top_reference);
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i],
+ this->epsilon_);
+ }
+}
+
+TYPED_TEST(LRNMapLayerTest, TestGPUForward) {
+ LayerParameter layer_param;
+ LRNMapLayer<TypeParam> layer(layer_param);
+ Caffe::set_mode(Caffe::GPU);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ Blob<TypeParam> top_reference;
+ this->ReferenceLRNMapForward(*(this->blob_bottom_), layer_param,
+ &top_reference);
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i],
+ this->epsilon_);
+ }
+}
+
+TYPED_TEST(LRNMapLayerTest, TestCPUGradient) {
+ LayerParameter layer_param;
+ LRNMapLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-2);
+ Caffe::set_mode(Caffe::CPU);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ for (int i = 0; i < this->blob_top_->count(); ++i) {
+ this->blob_top_->mutable_cpu_diff()[i] = 1.;
+ }
+ layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_));
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(LRNMapLayerTest, TestGPUGradient) {
+ LayerParameter layer_param;
+ LRNMapLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-2);
+ Caffe::set_mode(Caffe::GPU);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ for (int i = 0; i < this->blob_top_->count(); ++i) {
+ this->blob_top_->mutable_cpu_diff()[i] = 1.;
+ }
+ layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_));
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+} // namespace caffe
--- /dev/null
+// Copyright 2014 BVLC and contributors.
+
+#include <vector>
+
+#include "cuda_runtime.h"
+#include "gtest/gtest.h"
+
+#include "caffe/blob.hpp"
+#include "caffe/common.hpp"
+#include "caffe/filler.hpp"
+#include "caffe/vision_layers.hpp"
+#include "caffe/test/test_gradient_check_util.hpp"
+
+#include "caffe/test/test_caffe_main.hpp"
+
+namespace caffe {
+
+extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
+
+template <typename Dtype>
+class PowerLayerTest : public ::testing::Test {
+ protected:
+ PowerLayerTest()
+ : blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
+ blob_top_(new Blob<Dtype>()) {
+ // fill the values
+ FillerParameter filler_param;
+ GaussianFiller<Dtype> filler(filler_param);
+ filler.Fill(this->blob_bottom_);
+ blob_bottom_vec_.push_back(blob_bottom_);
+ blob_top_vec_.push_back(blob_top_);
+ }
+ virtual ~PowerLayerTest() { delete blob_bottom_; delete blob_top_; }
+ Blob<Dtype>* const blob_bottom_;
+ Blob<Dtype>* const blob_top_;
+ vector<Blob<Dtype>*> blob_bottom_vec_;
+ vector<Blob<Dtype>*> blob_top_vec_;
+};
+
+typedef ::testing::Types<float, double> Dtypes;
+TYPED_TEST_CASE(PowerLayerTest, Dtypes);
+
+TYPED_TEST(PowerLayerTest, TestPowerCPU) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ TypeParam power = 0.37;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ // Now, check values
+ const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
+ const TypeParam* top_data = this->blob_top_->cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ TypeParam expected_value = pow(shift + scale * bottom_data[i], power);
+ if (isnan(expected_value)) {
+ EXPECT_TRUE(isnan(top_data[i]));
+ } else {
+ TypeParam precision = expected_value * 0.0001;
+ precision *= (precision < 0) ? -1 : 1;
+ EXPECT_NEAR(expected_value, top_data[i], precision);
+ }
+ }
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerGradientCPU) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ TypeParam power = 0.37;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ // Avoid NaNs by forcing (shift + scale * x) >= 0
+ TypeParam* bottom_data = this->blob_bottom_->mutable_cpu_data();
+ TypeParam min_value = -shift / scale;
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ if (bottom_data[i] < min_value) {
+ bottom_data[i] = min_value + (min_value - bottom_data[i]);
+ }
+ }
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZeroCPU) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ TypeParam power = 0.37;
+ TypeParam scale = 0.83;
+ TypeParam shift = 0.0;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ // Flip negative values in bottom vector as x < 0 -> x^0.37 = nan
+ TypeParam* bottom_data = this->blob_bottom_->mutable_cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ bottom_data[i] *= (bottom_data[i] < 0) ? -1 : 1;
+ }
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerZeroCPU) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ TypeParam power = 0.0;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ // Now, check values
+ const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
+ const TypeParam* top_data = this->blob_top_->cpu_data();
+ TypeParam expected_value = TypeParam(1);
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ EXPECT_EQ(expected_value, top_data[i]);
+ }
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerZeroGradientCPU) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ TypeParam power = 0.0;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerOneCPU) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ TypeParam power = 1.0;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ // Now, check values
+ const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
+ const TypeParam* top_data = this->blob_top_->cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ TypeParam expected_value = shift + scale * bottom_data[i];
+ EXPECT_NEAR(expected_value, top_data[i], 0.001);
+ }
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerOneGradientCPU) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ TypeParam power = 1.0;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerTwoCPU) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ TypeParam power = 2.0;
+ TypeParam scale = 0.34;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ // Now, check values
+ const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
+ const TypeParam* top_data = this->blob_top_->cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ TypeParam expected_value = pow(shift + scale * bottom_data[i], 2);
+ EXPECT_NEAR(expected_value, top_data[i], 0.001);
+ }
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerTwoGradientCPU) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ TypeParam power = 2.0;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradientCPU) {
+ Caffe::set_mode(Caffe::CPU);
+ LayerParameter layer_param;
+ TypeParam power = 2.0;
+ TypeParam scale = 0.5;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerGPU) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ TypeParam power = 0.37;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ // Now, check values
+ const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
+ const TypeParam* top_data = this->blob_top_->cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ TypeParam expected_value = pow(shift + scale * bottom_data[i], power);
+ if (isnan(expected_value)) {
+ EXPECT_TRUE(isnan(top_data[i]));
+ } else {
+ TypeParam precision = expected_value * 0.0001;
+ precision *= (precision < 0) ? -1 : 1;
+ EXPECT_NEAR(expected_value, top_data[i], precision);
+ }
+ }
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerGradientGPU) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ TypeParam power = 0.37;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ // Avoid NaNs by forcing (shift + scale * x) >= 0
+ TypeParam* bottom_data = this->blob_bottom_->mutable_cpu_data();
+ TypeParam min_value = -shift / scale;
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ if (bottom_data[i] < min_value) {
+ bottom_data[i] = min_value + (min_value - bottom_data[i]);
+ }
+ }
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZeroGPU) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ TypeParam power = 0.37;
+ TypeParam scale = 0.83;
+ TypeParam shift = 0.0;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ // Flip negative values in bottom vector as x < 0 -> x^0.37 = nan
+ TypeParam* bottom_data = this->blob_bottom_->mutable_cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ bottom_data[i] *= (bottom_data[i] < 0) ? -1 : 1;
+ }
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerZeroGPU) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ TypeParam power = 0.0;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ // Now, check values
+ const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
+ const TypeParam* top_data = this->blob_top_->cpu_data();
+ TypeParam expected_value = TypeParam(1);
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ EXPECT_EQ(expected_value, top_data[i]);
+ }
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerZeroGradientGPU) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ TypeParam power = 0.0;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerOneGPU) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ TypeParam power = 1.0;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ // Now, check values
+ const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
+ const TypeParam* top_data = this->blob_top_->cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ TypeParam expected_value = shift + scale * bottom_data[i];
+ EXPECT_NEAR(expected_value, top_data[i], 0.001);
+ }
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerOneGradientGPU) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ TypeParam power = 1.0;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerTwoGPU) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ TypeParam power = 2.0;
+ TypeParam scale = 0.34;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ // Now, check values
+ const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
+ const TypeParam* top_data = this->blob_top_->cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ TypeParam expected_value = pow(shift + scale * bottom_data[i], 2);
+ EXPECT_NEAR(expected_value, top_data[i], 0.001);
+ }
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerTwoGradientGPU) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ TypeParam power = 2.0;
+ TypeParam scale = 0.83;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradientGPU) {
+ Caffe::set_mode(Caffe::GPU);
+ LayerParameter layer_param;
+ TypeParam power = 2.0;
+ TypeParam scale = 0.5;
+ TypeParam shift = -2.4;
+ layer_param.mutable_power_param()->set_power(power);
+ layer_param.mutable_power_param()->set_scale(scale);
+ layer_param.mutable_power_param()->set_shift(shift);
+ PowerLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-2, 1701, 0., 0.01);
+ checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
+
+} // namespace caffe