From 39ce801d1f5cd0e73d6d3e62986f1ccbee6f6942 Mon Sep 17 00:00:00 2001 From: Ilqar Ramazanli Date: Thu, 9 Sep 2021 06:39:14 -0700 Subject: [PATCH] To add Adamax algorithm to documentation (#63903) Summary: It has been discussed before that adding description of Optimization algorithms to PyTorch Core documentation may result in a nice Optimization research tutorial. In the following tracking issue we mentioned about all the necessary algorithms and links to the originally published paper https://github.com/pytorch/pytorch/issues/63236. In this PR we are adding description of Adamax Algorithm to the documentation. For more details, we refer to the paper https://arxiv.org/abs/1412.6980 Adamx Pull Request resolved: https://github.com/pytorch/pytorch/pull/63903 Reviewed By: albanD Differential Revision: D30819055 Pulled By: iramazanli fbshipit-source-id: 37f748cbea9f93bf37193ee30fc295fb1a1e9ffd --- torch/optim/adamax.py | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/torch/optim/adamax.py b/torch/optim/adamax.py index 4cb71c6..503c22b 100644 --- a/torch/optim/adamax.py +++ b/torch/optim/adamax.py @@ -4,9 +4,31 @@ from .optimizer import Optimizer class Adamax(Optimizer): - """Implements Adamax algorithm (a variant of Adam based on infinity norm). - - It has been proposed in `Adam: A Method for Stochastic Optimization`__. + r"""Implements Adamax algorithm (a variant of Adam based on infinity norm). + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 + \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}, + \: \lambda \text{ (weight decay)}, \\ + &\hspace{13mm} \epsilon \text{ (epsilon)} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + u_0 \leftarrow 0 \text{ ( infinity norm)} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}if \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}u_t \leftarrow \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining @@ -18,7 +40,8 @@ class Adamax(Optimizer): numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - __ https://arxiv.org/abs/1412.6980 + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 """ def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, -- 2.7.4