From aefa2f3e643ab50fe4d8238ccdeb980143b6b454 Mon Sep 17 00:00:00 2001 From: Ilqar Ramazanli Date: Sat, 28 Aug 2021 15:54:53 -0700 Subject: [PATCH] To add RMSProp algorithm documentation (#63721) Summary: It has been discussed before that adding description of Optimization algorithms to PyTorch Core documentation may result in a nice Optimization research tutorial. In the following tracking issue we mentioned about all the necessary algorithms and links to the originally published paper https://github.com/pytorch/pytorch/issues/63236. In this PR we are adding description of RMSProp to the documentation. For more details, we refer to the paper https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf RMSProp Pull Request resolved: https://github.com/pytorch/pytorch/pull/63721 Reviewed By: albanD Differential Revision: D30612426 Pulled By: iramazanli fbshipit-source-id: c3ac630a9658d1282866b53c86023ac10cf95398 --- torch/optim/rmsprop.py | 41 +++++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/torch/optim/rmsprop.py b/torch/optim/rmsprop.py index 4aab0b3..dc72181 100644 --- a/torch/optim/rmsprop.py +++ b/torch/optim/rmsprop.py @@ -6,15 +6,44 @@ from .optimizer import Optimizer class RMSprop(Optimizer): r"""Implements RMSprop algorithm. - Proposed by G. Hinton in his - `course `_. - - The centered version first appears in `Generating Sequences + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \alpha \text{ (alpha)},\: \gamma \text{ (lr)}, + \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\ + &\hspace{13mm} \lambda \text{ (weight decay)},\: \mu \text{ (momentum)},\: centered\\ + &\textbf{initialize} : v_0 \leftarrow 0 \text{ (square average)}, \: + \textbf{b}_0 \leftarrow 0 \text{ (buffer)}, \: g^{ave}_0 \leftarrow 0 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}if \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}v_t \leftarrow \alpha v_{t-1} + (1 - \alpha) g^2_t + \hspace{8mm} \\ + &\hspace{5mm} \tilde{v_t} \leftarrow v_t \\ + &\hspace{5mm}if \: centered \\ + &\hspace{10mm} g^{ave}_t \leftarrow g^{ave}_{t-1} \alpha + (1-\alpha) g_t \\ + &\hspace{10mm} \tilde{v_t} \leftarrow \tilde{v_t} - \big(g^{ave}_{t} \big)^2 \\ + &\hspace{5mm}if \: \mu > 0 \\ + &\hspace{10mm} \textbf{b}_t\leftarrow \mu \textbf{b}_{t-1} + + g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \\ + &\hspace{10mm} \theta_t \leftarrow \theta_{t-1} - \gamma \textbf{b}_t \\ + &\hspace{5mm} else \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - + \gamma g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \hspace{3mm} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to + `lecture notes `_ by G. Hinton. + and centered version `Generating Sequences With Recurrent Neural Networks `_. - The implementation here takes the square root of the gradient average before adding epsilon (note that TensorFlow interchanges these two operations). The effective - learning rate is thus :math:`\alpha/(\sqrt{v} + \epsilon)` where :math:`\alpha` + learning rate is thus :math:`\gamma/(\sqrt{v} + \epsilon)` where :math:`\gamma` is the scheduled learning rate and :math:`v` is the weighted moving average of the squared gradient. -- 2.7.4