Browse Source

docs(mge): fix amp docstring problems

GitOrigin-RevId: e5540eb940
release-1.5
Megvii Engine Team 3 years ago
parent
commit
bfc4e7a966
4 changed files with 12 additions and 11 deletions
  1. +5
    -5
      imperative/python/megengine/amp/autocast.py
  2. +3
    -3
      imperative/python/megengine/core/tensor/amp.py
  3. +1
    -1
      imperative/python/megengine/quantization/qconfig.py
  4. +3
    -2
      imperative/python/megengine/tensor.py

+ 5
- 5
imperative/python/megengine/amp/autocast.py View File

@@ -15,14 +15,14 @@ class autocast:
A class to control autocast mode for amp as a context manager or a decorator.

:param enabled: Whether autocast mode is enabled.
:low_prec_dtype: Set amp autocast mode's lower precision dtype. It will change the
target dtype in tensor casting for better speed and memory. Default: float16.
:high_prec_dtype: Set amp autocast mode's higher precision dtype. It will change the
target dtype in tensor casting for better precision. Default: float32.
:param low_prec_dtype: Set amp autocast mode's lower precision dtype. It will change
the target dtype in tensor casting for better speed and memory. Default: float16.
:param high_prec_dtype: Set amp autocast mode's higher precision dtype. It will
change the target dtype in tensor casting for better precision. Default: float32.

Examples:

..code-block::
.. code-block::

# used as decorator
@autocast()


+ 3
- 3
imperative/python/megengine/core/tensor/amp.py View File

@@ -17,7 +17,7 @@ def enabled(mod):

Examples:

..code-block::
.. code-block::

import megengine as mge
mge.amp.enabled = True
@@ -40,7 +40,7 @@ def high_prec_dtype(mod):

Examples:

..code-block::
.. code-block::

import megengine as mge
mge.amp.high_prec_dtype = "float32"
@@ -63,7 +63,7 @@ def low_prec_dtype(mod):

Examples:

..code-block::
.. code-block::

import megengine as mge
mge.amp.low_prec_dtype = "float16"


+ 1
- 1
imperative/python/megengine/quantization/qconfig.py View File

@@ -28,7 +28,7 @@ class QConfig(
)
):
r"""
A config class indicating how to do quantize toward :class:`~.QATModule`'s
A config class indicating how to do quantize toward :class:`~.QATModule` 's
``activation`` and ``weight``. See :meth:`~.QATModule.set_qconfig` for detail usage.

:param weight_observer: interface to instantiate an :class:`~.Observer` indicating


+ 3
- 2
imperative/python/megengine/tensor.py View File

@@ -30,6 +30,7 @@ class Tensor(_Tensor, ArrayMethodMixin):
A tensor object represents a multidimensional, homogeneous array of fixed-size items.

:param data: The value of returned Tensor.
:type data: Tensor, :class:`~.numpy.ndarray`, :class:`list` or python number.
:param dtype: The dtype of returned Tensor. Uses data's dtype if not specified.
:param device: The desired device of returned Tensor. Uses :func:`get_default_device` if not specified.
:param is_const: Whether make it a ``ImutableTensor`` in tracing mode.
@@ -43,7 +44,7 @@ class Tensor(_Tensor, ArrayMethodMixin):

def __new__(
cls,
data: Union["Tensor", np.ndarray, list, "scalar"] = None,
data: Union["Tensor", np.ndarray, list, int, float] = None,
dtype: np.dtype = None,
device: str = None,
is_const: bool = False,
@@ -76,7 +77,7 @@ class Tensor(_Tensor, ArrayMethodMixin):

def __init__(
self,
data: Union["Tensor", np.ndarray, list, "scalar"],
data: Union["Tensor", np.ndarray, list, int, float],
dtype: np.dtype = None,
device: str = None,
is_const: bool = False,


Loading…
Cancel
Save