From: Sergey Karayev Date: Mon, 19 May 2014 18:11:37 +0000 (-0700) Subject: Incorporated Evan’s comments for neuron layers X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c5db25b2033fbe44712d5a709e5af7fbabc4308d;p=platform%2Fupstream%2Fcaffe.git Incorporated Evan’s comments for neuron layers --- diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index e510841..fb2347d 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -124,11 +124,13 @@ class PowerLayer : public NeuronLayer { }; /* ReLULayer - Rectified Linear Unit non-linearity: fast and stable. + Rectified Linear Unit non-linearity. + The simple max is fast to compute, and the function does not saturate. y = max(0, x). - y' = x > 0 + y' = 0 if x < 0 + y' = 1 if x > 0 */ template class ReLULayer : public NeuronLayer { @@ -149,10 +151,14 @@ class ReLULayer : public NeuronLayer { }; /* SigmoidLayer - Sigmoid function non-linearity: a classic. + Sigmoid function non-linearity, a classic choice in neural networks. + Note that the gradient vanishes as the values move away from 0. + The ReLULayer is often a better choice for this reason. y = 1. / (1 + exp(-x)) + y ' = exp(x) / (1 + exp(x))^2 + or y' = y * (1 - y) */ template @@ -173,11 +179,11 @@ class SigmoidLayer : public NeuronLayer { }; /* TanHLayer - Hyperbolic tangent non-linearity. + Hyperbolic tangent non-linearity, popular in auto-encoders. y = 1. * (exp(2x) - 1) / (exp(2x) + 1) - y' = 1 - [(exp(2x) - 1) / (exp(2x) + 1)] ^ 2 + y' = 1 - ( (exp(2x) - 1) / (exp(2x) + 1) ) ^ 2 */ template class TanHLayer : public NeuronLayer {