From ffe0409398b4415071c32e2c56af535500fba761 Mon Sep 17 00:00:00 2001 From: Megvii Engine Team Date: Wed, 29 Apr 2020 10:10:17 +0800 Subject: [PATCH] docs(mge/optimizer): refine the docstring of several apis GitOrigin-RevId: a97fe5b68ace2983a16b747f5b4d73562feb8e9e --- python_module/megengine/optimizer/adam.py | 2 +- python_module/megengine/optimizer/internal.py | 2 +- python_module/megengine/optimizer/lr_scheduler.py | 2 +- python_module/megengine/optimizer/sgd.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/python_module/megengine/optimizer/adam.py b/python_module/megengine/optimizer/adam.py index 587ec2f7..6f264d3b 100644 --- a/python_module/megengine/optimizer/adam.py +++ b/python_module/megengine/optimizer/adam.py @@ -14,7 +14,7 @@ from .optimizer import Optimizer class Adam(Optimizer): - r"""Implements Adam algorithm. + r"""Implements Adam algorithm proposed in `"Adam: A Method for Stochastic Optimization" `_. :param params: iterable of parameters to optimize or dicts defining parameter groups. diff --git a/python_module/megengine/optimizer/internal.py b/python_module/megengine/optimizer/internal.py index 7e99b8f4..0483af9a 100644 --- a/python_module/megengine/optimizer/internal.py +++ b/python_module/megengine/optimizer/internal.py @@ -21,7 +21,7 @@ def add_update_fastpath( beta: Union[Tensor, float, int] = 1.0, bias: Union[Tensor, float, int] = 0.0 ): - """a fast-path ONLY used to update parameters in optimzier, since it + """a fast-path ONLY used to update parameters in optimizer, since it would bypass computing graph and launch dnn/add_update kernel directly, it is more efficient than functional/add_update. """ diff --git a/python_module/megengine/optimizer/lr_scheduler.py b/python_module/megengine/optimizer/lr_scheduler.py index 677607c8..7cdb6d9b 100644 --- a/python_module/megengine/optimizer/lr_scheduler.py +++ b/python_module/megengine/optimizer/lr_scheduler.py @@ -4,7 +4,7 @@ from .optimizer import Optimizer class LRScheduler(metaclass=ABCMeta): - r"""Base class for all lr_schedulers. + r"""Base class for all learning rate based schedulers. :param optimizer: Wrapped optimizer. :param current_epoch: The index of current epoch. Default: -1 diff --git a/python_module/megengine/optimizer/sgd.py b/python_module/megengine/optimizer/sgd.py index 0a24c20a..a1f807b3 100644 --- a/python_module/megengine/optimizer/sgd.py +++ b/python_module/megengine/optimizer/sgd.py @@ -17,7 +17,7 @@ class SGD(Optimizer): r"""Implements stochastic gradient descent. Nesterov momentum is based on the formula from - `On the importance of initialization and momentum in deep learning`. + `"On the importance of initialization and momentum in deep learning" `_ . :param params: iterable of parameters to optimize or dicts defining parameter groups.