From: Tobias Domhan Date: Thu, 13 Feb 2014 16:32:18 +0000 (+0100) Subject: added sigmoid layer X-Git-Tag: submit/tizen/20180823.020014~771^2~2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=76b8bff3b92a03367d2c29be99189f6ce512f719;p=platform%2Fupstream%2Fcaffeonacl.git added sigmoid layer --- diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index fd84866..82e52cd 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -46,6 +46,25 @@ class ReLULayer : public NeuronLayer { template +class SigmoidLayer : public NeuronLayer { + public: + explicit SigmoidLayer(const LayerParameter& param) + : NeuronLayer(param) {} + + protected: + virtual void Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual void Forward_gpu(const vector*>& bottom, + vector*>* top); + + virtual Dtype Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual Dtype Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); +}; + + +template class BNLLLayer : public NeuronLayer { public: explicit BNLLLayer(const LayerParameter& param) diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index f4fb5e5..b62ba38 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -47,6 +47,8 @@ Layer* GetLayer(const LayerParameter& param) { return new PoolingLayer(param); } else if (type == "relu") { return new ReLULayer(param); + } else if (type == "sigmoid") { + return new SigmoidLayer(param); } else if (type == "softmax") { return new SoftmaxLayer(param); } else if (type == "softmax_loss") { diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 1abee7c..8674519 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -89,6 +89,60 @@ TYPED_TEST(NeuronLayerTest, TestReLUGradientGPU) { } +TYPED_TEST(NeuronLayerTest, TestSigmoidCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + SigmoidLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); + //check that we squashed the value between 0 and 1 + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + } +} + + +TYPED_TEST(NeuronLayerTest, TestSigmoidGradientCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + SigmoidLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestSigmoidGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + SigmoidLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); + //check that we squashed the value between 0 and 1 + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + } +} + + +TYPED_TEST(NeuronLayerTest, TestSigmoidGradientGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + SigmoidLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); +} + + + TYPED_TEST(NeuronLayerTest, TestDropoutCPU) { LayerParameter layer_param; Caffe::set_mode(Caffe::CPU);