Browse Source

docs(imperative): fix multiple target warning when cross-reference

GitOrigin-RevId: c5b4884c8a
tags/v1.7.0.m1
Megvii Engine Team 3 years ago
parent
commit
7ac693a882
9 changed files with 19 additions and 19 deletions
  1. +4
    -4
      imperative/python/megengine/functional/nn.py
  2. +1
    -1
      imperative/python/megengine/module/dropout.py
  3. +1
    -1
      imperative/python/megengine/module/qat/concat.py
  4. +3
    -3
      imperative/python/megengine/module/qat/conv.py
  5. +2
    -2
      imperative/python/megengine/module/qat/conv_bn.py
  6. +1
    -1
      imperative/python/megengine/module/qat/elemwise.py
  7. +1
    -1
      imperative/python/megengine/module/qat/linear.py
  8. +1
    -1
      imperative/python/megengine/quantization/qconfig.py
  9. +5
    -5
      imperative/python/megengine/utils/network.py

+ 4
- 4
imperative/python/megengine/functional/nn.py View File

@@ -367,7 +367,7 @@ def conv_transpose2d(
) -> Tensor:
r"""2D transposed convolution operation.

Refer to :class:`~.ConvTranspose2d` for more information.
Refer to :class:`~.module.conv.ConvTranspose2d` for more information.

Args:
inp: feature map of the convolution operation.
@@ -1519,7 +1519,7 @@ def dropout(inp: Tensor, drop_prob: float, training: bool = True) -> Tensor:
inp: input tensor.
drop_prob: probability to drop (set to zero) a single element.
training: the default behavior of ``dropout`` during training is to rescale the output,
then it can be replaced by an :class:`~.Identity` during inference. Default: True
then it can be replaced by an :class:`~.module.identify.Identity` during inference. Default: True
Returns:
the ouput tensor

@@ -1669,7 +1669,7 @@ def sliding_window(
) -> Tensor:
r"""Extracts sliding local blocks from a batched input tensor.

Refer to :class:`~.SlidingWindow` for more information.
Refer to :class:`~.module.sliding_window.SlidingWindow` for more information.

Args:
inp: input tensor.
@@ -1707,7 +1707,7 @@ def sliding_window_transpose(
) -> Tensor:
r"""Sum over the sliding windows on the corresponding input location.

Refer to :class:`~.SlidingWindowTranspose` for more information.
Refer to :class:`~.module.sliding_window.SlidingWindowTranspose` for more information.

Args:
inp: input tensor.


+ 1
- 1
imperative/python/megengine/module/dropout.py View File

@@ -14,7 +14,7 @@ class Dropout(Module):
r"""Randomly sets input elements to zeros with the probability :math:`drop\_prob` during training.
Commonly used in large networks to prevent overfitting.
Note that we perform dropout only during training, we also rescale(multiply) the output tensor
by :math:`\frac{1}{1 - drop\_prob}`. During inference :class:`~.Dropout` is equal to :class:`~.Identity`.
by :math:`\frac{1}{1 - drop\_prob}`. During inference :class:`~.Dropout` is equal to :class:`~.module.identity.Identity`.

Args:
drop_prob: The probability to drop (set to zero) each single element


+ 1
- 1
imperative/python/megengine/module/qat/concat.py View File

@@ -14,7 +14,7 @@ from .module import QATModule

class Concat(Float.Concat, QATModule):
r"""A :class:`~.QATModule` to do functional :func:`~.concat` with QAT support.
Could be applied with :class:`~.Observer` and :class:`~.FakeQuantize`.
Could be applied with :class:`~.Observer` and :class:`~.quantization.fake_quant.FakeQuantize`.
"""

def forward(self, inps: Iterable[Tensor], axis: int = 0):


+ 3
- 3
imperative/python/megengine/module/qat/conv.py View File

@@ -12,7 +12,7 @@ from .module import QATModule

class Conv2d(Float.Conv2d, QATModule):
r"""A :class:`~.QATModule` :class:`~.module.Conv2d` with QAT support.
Could be applied with :class:`~.Observer` and :class:`~.FakeQuantize`.
Could be applied with :class:`~.Observer` and :class:`~.quantization.fake_quant.FakeQuantize`.
"""

def calc_conv_qat(self, inp):
@@ -50,7 +50,7 @@ class Conv2d(Float.Conv2d, QATModule):

class ConvRelu2d(Conv2d):
r"""A :class:`~.QATModule` include :class:`~.module.Conv2d` and :func:`~.relu` with QAT support.
Could be applied with :class:`~.Observer` and :class:`~.FakeQuantize`.
Could be applied with :class:`~.Observer` and :class:`~.quantization.fake_quant.FakeQuantize`.
"""

def forward(self, inp):
@@ -59,7 +59,7 @@ class ConvRelu2d(Conv2d):

class ConvTranspose2d(Float.ConvTranspose2d, QATModule):
r"""A :class:`~.QATModule` :class:`~.module.ConvTranspose2d` with QAT support.
Could be applied with :class:`~.Observer` and :class:`~.FakeQuantize`.
Could be applied with :class:`~.Observer` and :class:`~.quantization.fake_quant.FakeQuantize`.
"""

def calc_conv_transpose2d_qat(self, inp):


+ 2
- 2
imperative/python/megengine/module/qat/conv_bn.py View File

@@ -157,7 +157,7 @@ class _ConvBnActivation2d(Float._ConvBnActivation2d, QATModule):

class ConvBn2d(_ConvBnActivation2d):
r"""A fused :class:`~.QATModule` including :class:`~.module.Conv2d` and :class:`~.module.BatchNorm2d` with QAT support.
Could be applied with :class:`~.Observer` and :class:`~.FakeQuantize`.
Could be applied with :class:`~.Observer` and :class:`~.quantization.fake_quant.FakeQuantize`.
"""

def forward(self, inp):
@@ -166,7 +166,7 @@ class ConvBn2d(_ConvBnActivation2d):

class ConvBnRelu2d(_ConvBnActivation2d):
r"""A fused :class:`~.QATModule` including :class:`~.module.Conv2d`, :class:`~.module.BatchNorm2d` and :func:`~.relu` with QAT support.
Could be applied with :class:`~.Observer` and :class:`~.FakeQuantize`.
Could be applied with :class:`~.Observer` and :class:`~.quantization.fake_quant.FakeQuantize`.
"""

def forward(self, inp):


+ 1
- 1
imperative/python/megengine/module/qat/elemwise.py View File

@@ -11,7 +11,7 @@ from .module import QATModule

class Elemwise(Float.Elemwise, QATModule):
r"""A :class:`~.QATModule` to do :mod:`~.functional.elemwise` operator with QAT support.
Could be applied with :class:`~.Observer` and :class:`~.FakeQuantize`.
Could be applied with :class:`~.Observer` and :class:`~.quantization.fake_quant.FakeQuantize`.
"""

with_weight = False


+ 1
- 1
imperative/python/megengine/module/qat/linear.py View File

@@ -11,7 +11,7 @@ from .module import QATModule

class Linear(Float.Linear, QATModule):
r"""A :class:`~.QATModule` version of :class:`~.module.Linear`.
Could be applied with :class:`~.Observer` and :class:`~.FakeQuantize`.
Could be applied with :class:`~.Observer` and :class:`~.quantization.fake_quant.FakeQuantize`.

Args:
in_features: size of each input sample.


+ 1
- 1
imperative/python/megengine/quantization/qconfig.py View File

@@ -34,7 +34,7 @@ class QConfig(
weight_observer: interface to instantiate an :class:`~.Observer` indicating
how to collect scales and zero_point of wegiht.
act_observer: similar to ``weight_observer`` but toward activation.
weight_fake_quant: interface to instantiate a :class:`~.FakeQuantize` indicating
weight_fake_quant: interface to instantiate a :class:`~.quantization.fake_quant.FakeQuantize` indicating
how to do fake_quant calculation.
act_observer: similar to ``weight_fake_quant`` but toward activation.


+ 5
- 5
imperative/python/megengine/utils/network.py View File

@@ -532,13 +532,13 @@ def set_symbolic_shape(option: bool):


def as_varnode(obj):
r"""convert a :class:`.VarNode` compatible object to :class:`.VarNode`.
r"""convert a :class:`.utils.network_node.VarNode` compatible object to :class:`.utils.network_node.VarNode`.

Args:
obj: it must be one of the following:

1. a :class:`.VarNode` object
2. a :class:`.OpNode` object that has unique output
1. a :class:`.utils.network_node.VarNode` object
2. a :class:`.utils.network_node.OpNode` object that has unique output
3. an iterable that produces either type 1 or 2, with length 1

"""
@@ -568,8 +568,8 @@ def as_varnode(obj):


def as_oprnode(obj):
r"""convert a :class:`.OpNode` compatible object to
:class:`.OpNode`; it works like :func:`as_varnode`.i
r"""convert a :class:`.utils.network_node.OpNode` compatible object to
:class:`.utils.network_node.OpNode`; it works like :func:`as_varnode`.
"""
if type(obj) is VarNode:
return obj.owner


Loading…
Cancel
Save