From c5db25b2033fbe44712d5a709e5af7fbabc4308d Mon Sep 17 00:00:00 2001 From: Sergey Karayev Date: Mon, 19 May 2014 11:11:37 -0700 Subject: [PATCH] =?utf8?q?Incorporated=20Evan=E2=80=99s=20comments=20for?= =?utf8?q?=20neuron=20layers?= MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit --- include/caffe/neuron_layers.hpp | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index e5108416..fb2347da 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -124,11 +124,13 @@ class PowerLayer : public NeuronLayer { }; /* ReLULayer - Rectified Linear Unit non-linearity: fast and stable. + Rectified Linear Unit non-linearity. + The simple max is fast to compute, and the function does not saturate. y = max(0, x). - y' = x > 0 + y' = 0 if x < 0 + y' = 1 if x > 0 */ template class ReLULayer : public NeuronLayer { @@ -149,10 +151,14 @@ class ReLULayer : public NeuronLayer { }; /* SigmoidLayer - Sigmoid function non-linearity: a classic. + Sigmoid function non-linearity, a classic choice in neural networks. + Note that the gradient vanishes as the values move away from 0. + The ReLULayer is often a better choice for this reason. y = 1. / (1 + exp(-x)) + y ' = exp(x) / (1 + exp(x))^2 + or y' = y * (1 - y) */ template @@ -173,11 +179,11 @@ class SigmoidLayer : public NeuronLayer { }; /* TanHLayer - Hyperbolic tangent non-linearity. + Hyperbolic tangent non-linearity, popular in auto-encoders. y = 1. * (exp(2x) - 1) / (exp(2x) + 1) - y' = 1 - [(exp(2x) - 1) / (exp(2x) + 1)] ^ 2 + y' = 1 - ( (exp(2x) - 1) / (exp(2x) + 1) ) ^ 2 */ template class TanHLayer : public NeuronLayer { -- 2.34.1