Browse Source

docs(functional): replace elem testcode with doctest format

GitOrigin-RevId: a32a011724
release-1.10
Megvii Engine Team 3 years ago
parent
commit
60163c5d12
15 changed files with 961 additions and 1833 deletions
  1. +326
    -211
      imperative/python/megengine/functional/elemwise.py
  2. +86
    -307
      imperative/python/megengine/functional/math.py
  3. +62
    -178
      imperative/python/megengine/functional/nn.py
  4. +164
    -415
      imperative/python/megengine/functional/tensor.py
  5. +63
    -151
      imperative/python/megengine/functional/vision.py
  6. +47
    -122
      imperative/python/megengine/module/activation.py
  7. +14
    -38
      imperative/python/megengine/module/adaptive_pooling.py
  8. +12
    -23
      imperative/python/megengine/module/batchnorm.py
  9. +18
    -51
      imperative/python/megengine/module/conv.py
  10. +17
    -39
      imperative/python/megengine/module/embedding.py
  11. +6
    -17
      imperative/python/megengine/module/linear.py
  12. +11
    -25
      imperative/python/megengine/module/lrn.py
  13. +7
    -18
      imperative/python/megengine/module/pooling.py
  14. +22
    -32
      imperative/python/megengine/module/sliding_window.py
  15. +106
    -206
      imperative/python/megengine/random/rng.py

+ 326
- 211
imperative/python/megengine/functional/elemwise.py View File

@@ -79,134 +79,371 @@ def _elemwise_multi_type(*args, mode, **kwargs):
# math operations


def add(x, y):
r"""Element-wise `addition`.
def add(x: Tensor, y: Tensor) -> Tensor:
r"""Calculates the sum for each element :math:`x_i` of the input tensor :math:`x` with the respective element :math:`y_i` of the input tensor :math:`y`.

Args:
x: first input tensor. Should have a numeric data type.
y: second input tensor. Must be compatible with ``x`` (see :ref:`broadcasting-rule` ). Should have a numeric data type.

Returns:
A tensor containing the element-wise sums. The returned tensor must have a data type determined by :ref:`dtype-promotion`.

.. admonition:: Special cases

For floating-point operands,

* If either :math:`x` or :math:`y` is ``NaN``, the result is ``NaN``.
* If :math:`x` is ``+infinity`` and :math:`y` is ``-infinity``, the result is ``NaN``.
* If :math:`x` is ``-infinity`` and :math:`y` is ``+infinity``, the result is ``NaN``.
* If :math:`x` is ``+infinity`` and :math:`y` is ``+infinity``, the result is ``+infinity``.
* If :math:`x` is ``-infinity`` and :math:`y` is ``-infinity``, the result is ``-infinity``.
* If :math:`x` is ``+infinity`` and :math:`y` is a finite number, the result is ``+infinity``.
* If :math:`x` is ``-infinity`` and :math:`y` is a finite number, the result is ``-infinity``.
* If :math:`x` is a finite number and :math:`y` is ``+infinity``, the result is ``+infinity``.
* If :math:`x` is a finite number and :math:`y` is ``-infinity``, the result is ``-infinity``.
* If :math:`x` is ``-0`` and :math:`y` is ``-0``, the result is ``-0``.
* If :math:`x` is ``-0`` and :math:`y` is ``+0``, the result is ``+0``.
* If :math:`x` is ``+0`` and :math:`y` is ``-0``, the result is ``+0``.
* If :math:`x` is ``+0`` and :math:`y` is ``+0``, the result is ``+0``.
* If :math:`x` is either ``+0`` or ``-0`` and :math:`y` is a nonzero finite number, the result is :math:`y`.
* If :math:`x` is a nonzero finite number and :math:`y` is either ``+0`` or ``-0``, the result is :math:`x`.
* If :math:`x` is a nonzero finite number and :math:`y` is :math:`-x`, the result is ``+0``.
* In the remaining cases, when neither ``infinity``, ``+0``, ``-0``, nor a ``NaN`` is involved,
and the operands have the same mathematical sign or have different magnitudes,
the sum must be computed and rounded to the nearest representable value according to
IEEE 754-2019 and a supported round mode. If the magnitude is too large to represent,
the operation overflows and the result is an infinity of appropriate mathematical sign.

.. note::

* Floating-point addition is a commutative operation, but not always associative.
* The ``+`` operator can be used as a shorthand for ``add`` on tensors.

Examples:
>>> F.add(1.0, 4.0)
Tensor(5.0, device=xpux:0)
>>> x = Tensor([[1, 2, 3], [4, 5, 6]])
>>> y = Tensor([[1, 1, 1], [2, 2, 2]])
>>> F.add(x, y)
Tensor([[2 3 4]
[6 7 8]], dtype=int32, device=xpux:0)
>>> F.add(x, 1)
Tensor([[2 3 4]
[5 6 7]], dtype=int32, device=xpux:0)
"""
return _elwise(x, y, mode=Elemwise.Mode.ADD)

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F
def sub(x: Tensor, y: Tensor) -> Tensor:
r"""Calculates the difference for each element :math:`x_i` of the input tensor :math:`x` with the respective element :math:`y` of the input tensor :math:`y`.
The result of :math:`x_i - y_i` must be the same as :math:`x_i + (-y_i)` and must be governed by the same floating-point rules as addition.
(See :func:`~.functional.add` ).

x = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
y = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
out = F.add(x, y)
print(out.numpy())
Args:
x: first input tensor. Should have a numeric data type.
y: second input tensor. Must be compatible with ``x`` (see :ref:`broadcasting-rule` ). Should have a numeric data type.

Returns:
A tensor containing the element-wise differences. The returned tensor must have a data type determined by :ref:`dtype-promotion`.

Outputs:
.. note::

.. testoutput::
The ``-`` operator can be used as a shorthand for ``sub`` on Tensors.

[[ 0. 2. 4.]
[ 6. 8. 10.]]
Examples:
>>> F.sub(1.0, 4.0)
Tensor(-3.0, device=xpux:0)
>>> x = Tensor([[1, 2, 3], [4, 5, 6]])
>>> y = Tensor([[1, 1, 1], [2, 2, 2]])
>>> F.sub(x, y)
Tensor([[0 1 2]
[2 3 4]], dtype=int32, device=xpux:0)
>>> F.sub(x, 1)
Tensor([[0 1 2]
[3 4 5]], dtype=int32, device=xpux:0)
"""
return _elwise(x, y, mode=Elemwise.Mode.ADD)
return _elwise(x, y, mode=Elemwise.Mode.SUB)


def sub(x, y):
r"""Element-wise `sub`.
def mul(x: Tensor, y: Tensor) -> Tensor:
r"""Calculates the product for each element :math:`x_i` of the input tensor `x` with the respective element :math:`y_i` of the input tensor :math:`y`.

Examples:
Args:
x: first input tensor. Should have a numeric data type.
y: second input tensor. Must be compatible with ``x`` (see :ref:`broadcasting-rule` ). Should have a numeric data type.

.. testcode::
Returns:
A tensor containing the element-wise products. The returned tensor must have a data type determined by :ref:`dtype-promotion`.

import numpy as np
from megengine import tensor
import megengine.functional as F
.. admonition:: Special cases

x = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
y = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
out = F.sub(x, y)
print(out.numpy())
For floating-point operands,

Outputs:
* If either :math:`x_i` or :math:`y_i` is ``NaN``, the result is ``NaN``.
* If :math:`x_i` is either ``+infinity`` or ``-infinity`` and :math:`y_i` is either ``+0`` or ``-0``, the result is ``NaN``.
* If :math:`x_i` is either ``+0`` or ``-0`` and :math:`y_i` is either ``+infinity`` or ``-infinity``, the result is ``NaN``.
* If :math:`x_i` and :math:`y_i` have different mathematical signs, the result has a negative mathematical sign, unless the result is ``NaN``.
* If :math:`x_i` is either ``+infinity`` or ``-infinity`` and :math:`y_i` is either ``+infinity`` or ``-infinity``,
the result is a signed infinity with the mathematical sign determined by the rule already stated above.
* If :math:`x_i` is either ``+infinity`` or ``-infinity`` and :math:`y_i` is a nonzero finite number,
the result is a signed infinity with the mathematical sign determined by the rule already stated above.
* If :math:`x_i` is a nonzero finite number and :math:`y_i` is either ``+infinity`` or ``-infinity``,
the result is a signed infinity with the mathematical sign determined by the rule already stated above.
* In the remaining cases, where neither ``infinity`` nor ``NaN`` is involved,
the product must be computed and rounded to the nearest representable value according to IEEE 754-2019 and a supported rounding mode.
If the magnitude is too large to represent, the result is an `infinity` of appropriate mathematical sign.
If the magnitude is too small to represent, the result is a zero of appropriate mathematical sign.

.. testoutput::
.. Note::

[[1. 1. 1.]
[1. 1. 1.]]
* Floating-point multiplication is not always associative due to finite precision.
* The ``*`` operator can be used as a shorthand for ``mul`` on tensors.

Examples:
>>> F.mul(1.0, 4.0)
Tensor(4.0, device=xpux:0)
>>> x = Tensor([[1, 2, 3], [4, 5, 6]])
>>> y = Tensor([[1, 1, 1], [2, 2, 2]])
>>> F.mul(x, y)
Tensor([[ 1 2 3]
[ 8 10 12]], dtype=int32, device=xpux:0)
>>> F.mul(x, 2)
Tensor([[ 2 4 6]
[ 8 10 12]], dtype=int32, device=xpux:0)
"""
return _elwise(x, y, mode=Elemwise.Mode.SUB)
return _elwise(x, y, mode=Elemwise.Mode.MUL)


def mul(x: Tensor, y: Tensor) -> Tensor:
r"""Calculates the product for each element :math:`x_i` of the input tensor `x` with the respective element :math:`y_i` of the input tensor `y`.

Note:
* If either :math:`x_i` or :math:`y_i` is `NaN`, the result is `NaN`.
* If :math:`x_i` is either `+infinity` or `-infinity` and :math:`y_i` is either `+0` or `-0`, the result is `NaN`.
* If :math:`x_i` is either `+0` or `-0` and :math:`y_i` is either `+infinity` or `-infinity`, the result is `NaN`.
* If :math:`x_i` and :math:`y_i` have different mathematical signs, the result has a negative mathematical sign, unless the result is `NaN`.
* If :math:`x_i` is either `+infinity` or `-infinity` and :math:`y_i` is either `+infinity` or `-infinity`,
the result is a signed infinity with the mathematical sign determined by the rule already stated above.
* If :math:`x_i` is either `+infinity` or `-infinity` and :math:`y_i` is a nonzero finite number,
the result is a signed infinity with the mathematical sign determined by the rule already stated above.
* If :math:`x_i` is a nonzero finite number and :math:`y_i` is either `+infinity` or `-infinity`,
the result is a signed infinity with the mathematical sign determined by the rule already stated above.
* In the remaining cases, where neither `infinity` nor `NaN` is involved,
the product must be computed and rounded to the nearest representable value according to IEEE 754-2019 and a supported rounding mode.
If the magnitude is too large to represent, the result is an `infinity` of appropriate mathematical sign.
If the magnitude is too small to represent, the result is a zero of appropriate mathematical sign.
* Floating-point multiplication is not always associative due to finite precision.
def div(x: Tensor, y: Tensor) -> Tensor:
r"""Calculates the division for each element :math:`x_i` of the input tensor :math:`x` with the respective element :math:`y_i` of the input tensor :math:`y`.

Args:
x: first input tensor. Should have a numeric data type.
y: second input tensor. Must be compatible with `x` (see :ref:`broadcasting-rule` ). Should have a numeric data type.
x: dividend input tensor. Should have a numeric data type.
y: divisor input tensor. Must be compatible with ``x``` (see :ref:`broadcasting-rule` ). Should have a numeric data type.

Returns:
A tensor containing the element-wise products. The returned array must have a data type determined by :ref:`dtype-promotion`.
A tensor containing the element-wise results. The returned tensor must have a data type determined by :ref:`dtype-promotion`.

.. admonition:: Special cases

For floating-point operands,

* If either :math:`x` or :math:`y` is ``NaN``, the result is ``NaN``.
* If :math:`x` is either ``+infinity`` or ``-infinity`` and :math:`y` is either ``+infinity`` or ``-infinity``, the result is ``NaN``.
* If :math:`x` is either ``+0`` or ``-0`` and :math:`y` is either ``+0`` or ``-0``, the result is ``NaN``.
* If :math:`x` is ``+0`` and :math:`y` is greater than ``0``, the result is ``+0``.
* If :math:`x` is ``-0`` and :math:`y` is greater than ``0``, the result is ``-0``.
* If :math:`x` is ``+0`` and :math:`y` is less than ``0``, the result is ``-0``.
* If :math:`x` is ``-0`` and :math:`y` is less than ``0``, the result is ``+0``.
* If :math:`x` is greater than ``0`` and :math:`y` is ``+0``, the result is ``+infinity``.
* If :math:`x` is greater than ``0`` and :math:`y` is ``-0``, the result is ``-infinity``.
* If :math:`x` is less than ``0`` and :math:`y` is ``+0``, the result is ``-infinity``.
* If :math:`x` is less than ``0`` and :math:`y` is ``-0``, the result is ``+infinity``.
* If :math:`x` is ``+infinity`` and :math:`y` is a positive (i.e., greater than ``0``) finite number, the result is ``+infinity``.
* If :math:`x` is ``+infinity`` and :math:`y` is a negative (i.e., less than ``0``) finite number, the result is ``-infinity``.
* If :math:`x` is ``-infinity`` and :math:`y` is a positive (i.e., greater than ``0``) finite number, the result is ``-infinity``.
* If :math:`x` is ``-infinity`` and :math:`y` is a negative (i.e., less than ``0``) finite number, the result is ``+infinity``.
* If :math:`x` is a positive (i.e., greater than ``0``) finite number and :math:`y` is ``+infinity``, the result is ``+0``.
* If :math:`x` is a positive (i.e., greater than ``0``) finite number and :math:`y` is ``-infinity``, the result is ``-0``.
* If :math:`x` is a negative (i.e., less than ``0``) finite number and :math:`y` is ``+infinity``, the result is ``-0``.
* If :math:`x` is a negative (i.e., less than ``0``) finite number and :math:`y` is ``-infinity``, the result is ``+0``.
* If :math:`x` and :math:`y` have the same mathematical sign and are both nonzero finite numbers, the result has a positive mathematical sign.
* If :math:`x` and :math:`y` have different mathematical signs and are both nonzero finite numbers, the result has a negative mathematical sign.
* In the remaining cases, where neither ``-infinity``, ``+0``, ``-0``, nor ``NaN`` is involved, the quotient must be computed and rounded to the nearest representable value according to IEEE 754-2019 and a supported rounding mode. If the magnitude is too large to represent, the operation overflows and the result is an infinity of appropriate mathematical sign. If the magnitude is too small to represent, the operation underflows and the result is a zero of appropriate mathematical sign.

.. note::

The ``/`` operator can be used as a shorthand for ``div`` on tensors.

.. seealso::

In Python, ``//`` is the floor division operator and ``/`` is the true division operator.
See :func:`~.functional.floor_div`

Examples:
>>> F.div(1.0, 4.0)
Tensor(0.25, device=xpux:0)
>>> x = Tensor([[1, 2, 3], [4, 5, 6]])
>>> y = Tensor([[1, 1, 1], [2, 2, 2]])
>>> F.div(x, y)
Tensor([[1. 2. 3. ]
[2. 2.5 3. ]], device=xpux:0)
>>> F.div(x, 2)
Tensor([[0.5 1. 1.5]
[2. 2.5 3. ]], device=xpux:0)
"""
return _elwise(x, y, mode=Elemwise.Mode.TRUE_DIV)

>>> F.mul(2, 3)
Tensor(6, dtype=int32, device=xpux:0)

>>> F.mul(2.0, 3.0)
Tensor(6.0, device=xpux:0)

>>> x = F.arange(6.0).reshape(2, 3)
>>> y = F.arange(3.0)
>>> F.mul(x, y)
Tensor([[ 0. 1. 4.]
[ 0. 4. 10.]], device=xpux:0)
def floor_div(x: Tensor, y: Tensor) -> Tensor:
r"""Rounds the result of dividing each element :math:`x_i` of the input tensor :math:`x`
by the respective element :math:`y_i` of the input tensor :math:`y` to the greatest
(i.e., closest to ``+infinity``) integer-value number that is not greater than the division result.

The `*` operator can be used as a shorthand for :func:`~.functional.mul` on tensors.
Args:
x: dividend input tensor. Should have a numeric data type.
y: divisor input tensor. Must be compatible with ``x``` (see :ref:`broadcasting-rule` ). Should have a numeric data type.

>>> x = F.arange(6.0).reshape(2, 3)
>>> y = F.arange(3.0)
>>> x * y
Tensor([[ 0. 1. 4.]
[ 0. 4. 10.]], device=xpux:0)
Returns:
A tensor containing the element-wise results. The returned tensor must have a data type determined by :ref:`dtype-promotion`.

.. admonition:: Special cases

For floating-point operands,

* If either :math:`x` or :math:`y` is ``NaN``, the result is ``NaN``.
* If :math:`x` is either ``+infinity`` or ``-infinity`` and :math:`y` is either ``+infinity`` or ``-infinity``, the result is ``NaN``.
* If :math:`x` is either ``+0`` or ``-0`` and :math:`y` is either ``+0`` or ``-0``, the result is ``NaN``.
* If :math:`x` is ``+0`` and :math:`y` is greater than ``0``, the result is ``+0``.
* If :math:`x` is ``-0`` and :math:`y` is greater than ``0``, the result is ``-0``.
* If :math:`x` is ``+0`` and :math:`y` is less than ``0``, the result is ``-0``.
* If :math:`x` is ``-0`` and :math:`y` is less than ``0``, the result is ``+0``.
* If :math:`x` is greater than ``0`` and :math:`y` is ``+0``, the result is ``+infinity``.
* If :math:`x` is greater than ``0`` and :math:`y` is ``-0``, the result is ``-infinity``.
* If :math:`x` is less than ``0`` and :math:`y` is ``+0``, the result is ``-infinity``.
* If :math:`x` is less than ``0`` and :math:`y` is ``-0``, the result is ``+infinity``.
* If :math:`x` is ``+infinity`` and :math:`y` is a positive (i.e., greater than ``0``) finite number, the result is ``+infinity``.
* If :math:`x` is ``+infinity`` and :math:`y` is a negative (i.e., less than ``0``) finite number, the result is ``-infinity``.
* If :math:`x` is ``-infinity`` and :math:`y` is a positive (i.e., greater than ``0``) finite number, the result is ``-infinity``.
* If :math:`x` is ``-infinity`` and :math:`y` is a negative (i.e., less than ``0``) finite number, the result is ``+infinity``.
* If :math:`x` is a positive (i.e., greater than ``0``) finite number and :math:`y` is ``+infinity``, the result is ``+0``.
* If :math:`x` is a positive (i.e., greater than ``0``) finite number and :math:`y` is ``-infinity``, the result is ``-0``.
* If :math:`x` is a negative (i.e., less than ``0``) finite number and :math:`y` is ``+infinity``, the result is ``-0``.
* If :math:`x` is a negative (i.e., less than ``0``) finite number and :math:`y` is ``-infinity``, the result is ``+0``.
* If :math:`x` and :math:`y` have the same mathematical sign and are both nonzero finite numbers, the result has a positive mathematical sign.
* If :math:`x` and :math:`y` have different mathematical signs and are both nonzero finite numbers, the result has a negative mathematical sign.
* In the remaining cases, where neither ``-infinity``, ``+0``, ``-0``, nor ``NaN`` is involved, the quotient must be computed and rounded to the nearest representable value according to IEEE 754-2019 and a supported rounding mode. If the magnitude is too large to represent, the operation overflows and the result is an infinity of appropriate mathematical sign. If the magnitude is too small to represent, the operation underflows and the result is a zero of appropriate mathematical sign.

.. note::

The ``//`` operator can be used as a shorthand for ``floor_div`` on tensors.

.. seealso::

In Python, ``//`` is the floor division operator and ``/`` is the true division operator.
See :func:`~.functional.div`

Examples:
>>> F.floor_div(5.0, 4.0)
Tensor(1.0, device=xpux:0)
>>> x = Tensor([[1, 2, 3], [4, 5, 6]])
>>> y = Tensor([[1, 1, 1], [2, 2, 2]])
>>> F.floor_div(x, y)
Tensor([[1 2 3]
[2 2 3]], dtype=int32, device=xpux:0)
>>> F.floor_div(x, 2)
Tensor([[0 1 1]
[2 2 3]], dtype=int32, device=xpux:0)
"""
return _elwise(x, y, mode=Elemwise.Mode.MUL)
return _elwise(x, y, mode=Elemwise.Mode.FLOOR_DIV)


def div(x, y):
r"""Element-wise `(x / y)`."""
return _elwise(x, y, mode=Elemwise.Mode.TRUE_DIV)
def neg(x: Tensor) -> Tensor:
r"""Computes the numerical negative of each element :math:`x_i` (i.e., :math:`y_i = -x_i` ) of the input tensor :math:`x`.

Args:
x: input tensor. Should have a numeric data type.

def floor_div(x, y):
r"""Element-wise `floor(x / y)`."""
return _elwise(x, y, mode=Elemwise.Mode.FLOOR_DIV)
Returns:
A tensor containing the evaluated result for each element in :math:`x`.
The returned tensor must have a data type determined by :ref:`dtype-promotion`.

.. note::

The unary ``-`` operator can be used as a shorthand for ``neg`` on tensors.

def neg(x):
r"""Element-wise `negation`."""
Examples:
>>> x = Tensor([1, -1])
>>> F.neg(x)
Tensor([-1 1], dtype=int32, device=xpux:0)
"""
return _elwise(x, mode=Elemwise.Mode.NEGATE)


def pow(x, y):
r"""Element-wise `power`."""
def pow(x: Tensor, y: Tensor) -> Tensor:
r"""Calculates an implementation-dependent approximation of exponentiation by
raising each element :math:`x_i` (the base) of the input tensor :math:`x` to
the power of :math:`y_i` (the exponent), where :math:`y_i` is the corresponding element of the input tensor :math:`y`.

Args:
x: first input tensor whose elements correspond to the exponentiation base. Should have a numeric data type.
y: second input tensor whose elements correspond to the exponentiation exponent. Must be compatible with `x` (see :ref:`broadcasting-rule` ). Should have a numeric data type.

Returns:
A tensor containing the element-wise results. The returned tensor must have a data type determined by :ref:`dtype-promotion`.

.. note::

The unary ``**`` operator can be used as a shorthand for ``pow`` on tensors.

.. admonition:: Special cases

For floating-point operands,

* If :math:`x_i` is not equal to ``1`` and :math:`y_i` is ``NaN``, the result is ``NaN``.
* If :math:`y_i` is ``+0``, the result is ``1``, even if ``x_i`` is ``NaN``.
* If :math:`y_i` is ``-0``, the result is ``1``, even if ``x_i`` is ``NaN``.
* If :math:`x_i` is ``NaN`` and ``y_i`` is not equal to ``0``, the result is ``NaN``.
* If ``abs(x_i)`` is greater than ``1`` and ``y_i`` is ``+infinity``, the result is ``+infinity``.
* If ``abs(x_i)`` is greater than ``1`` and ``y_i`` is ``-infinity``, the result is ``+0``.
* If ``abs(x_i)`` is ``1`` and ``y_i`` is ``+infinity``, the result is ``1``.
* If ``abs(x_i)`` is ``1`` and ``y_i`` is ``-infinity``, the result is ``1``.
* If ``x_i`` is ``1`` and ``y_i`` is not ``NaN``, the result is ``1``.
* If ``abs(x_i)`` is less than ``1`` and ``y_i`` is ``+infinity``, the result is ``+0``.
* If ``abs(x_i)`` is less than ``1`` and ``y_i`` is ``-infinity``, the result is ``+infinity``.
* If ``x_i`` is ``+infinity`` and ``y_i`` is greater than 0, the result is ``+infinity``.
* If ``x_i`` is ``+infinity`` and ``y_i`` is less than 0, the result is ``+0``.
* If ``x_i`` is ``-infinity``, ``y_i`` is greater than 0, and ``y_i`` is an odd integer value, the result is ``-infinity``.
* If ``x_i`` is ``-infinity``, ``y_i`` is greater than 0, and ``y_i`` is not an odd integer value, the result is ``+infinity``.
* If ``x_i`` is ``-infinity``, ``y_i`` is less than 0, and ``y_i`` is an odd integer value, the result is ``-0``.
* If ``x_i`` is ``-infinity``, ``y_i`` is less than 0, and ``y_i`` is not an odd integer value, the result is ``+0``.
* If ``x_i`` is ``+0`` and ``y_i`` is greater than 0, the result is ``+0``.
* If ``x_i`` is ``+0`` and ``y_i`` is less than 0, the result is ``+infinity``.
* If ``x_i`` is ``-0``, ``y_i`` is greater than 0, and ``y_i`` is an odd integer value, the result is ``-0``.
* If ``x_i`` is ``-0``, ``y_i`` is greater than 0, and ``y_i`` is not an odd integer value, the result is ``+0``.
* If ``x_i`` is ``-0``, ``y_i`` is less than 0, and ``y_i`` is an odd integer value, the result is ``-infinity``.
* If ``x_i`` is ``-0``, ``y_i`` is less than 0, and ``y_i`` is not an odd integer value, the result is ``+infinity``.
* If ``x_i`` is less than 0, ``x_i`` is a finite number, ``y_i`` is a finite number, and ``y_i`` is not an integer value, the result is ``NaN``.

Examples:
>>> F.pow(2.0, 3.0)
Tensor(8.0, device=xpux:0)
>>> x = Tensor([1, 2, 3, 4, 5])
>>> y = Tensor([1, 2, 1, 2, 1])
>>> F.pow(x, y)
Tensor([ 1. 4. 3. 16. 5.], device=xpux:0)
>>> F.pow(x, 2)
Tensor([ 1. 4. 9. 16. 25.], device=xpux:0)
"""
return _elwise(x, y, mode=Elemwise.Mode.POW)


def mod(x, y):
r"""Element-wise `remainder of division`."""
def mod(x: Tensor, y: Tensor) -> Tensor:
r"""Returns the remainder of division for each element ``x_i`` of the input tensor ``x``
and the respective element ``y_i`` of the input tensor ``y``.

.. note:: ``mod`` is an alias of ``remainder`` in NumPy.

.. seealso:: :func:`~.div` / :func:`~.floor_div`

Args:
x: dividend input tensor. Should have a numeric data type.
y: divisor input tensor. Must be compatible with ``x`` (see :ref:`broadcasting-rule` ). Should have a numeric data type.

Returns:
A tensor containing the element-wise results. The returned tensor must have a data type determined by :ref:`dtype-promotion`.

Examples:
>>> F.mod(8, 3)
Tensor(2, dtype=int32, device=xpux:0)
>>> x = Tensor([1, 2, 3, 4, 5])
>>> y = Tensor([1, 2, 1, 2, 1])
>>> F.mod(x, y)
Tensor([0 0 0 0 0], dtype=int32, device=xpux:0)
>>> F.mod(x, 3)
Tensor([1 2 0 1 2], dtype=int32, device=xpux:0)
"""
return _elwise(x, y, mode=Elemwise.Mode.MOD)


@@ -236,52 +473,12 @@ def log1p(x):


def sqrt(x: Tensor) -> Tensor:
r"""Element-wise `sqrt`.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
out = F.sqrt(x)
print(out.numpy().round(decimals=4))

Outputs:

.. testoutput::

[[0. 1. 1.4142]
[1.7321 2. 2.2361]]
"""
r"""Element-wise `sqrt`."""
return x ** 0.5


def square(x: Tensor) -> Tensor:
r"""Element-wise `square`.

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.functional as F

data = mge.tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
out = F.square(data)
print(out.numpy().round(decimals=4))

Outputs:

.. testoutput::

[[ 0. 1. 4.]
[ 9. 16. 25.]]
"""
r"""Element-wise `square`."""
return x ** 2


@@ -314,27 +511,7 @@ def minimum(x, y):


def cos(x):
r"""Element-wise `cosine`.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
out = F.cos(x)
print(out.numpy().round(decimals=4))

Outputs:

.. testoutput::

[[ 1. 0.5403 -0.4161]
[-0.99 -0.6536 0.2837]]
"""
r"""Element-wise `cosine`."""
return _elwise(x, mode=Elemwise.Mode.COS)


@@ -407,28 +584,7 @@ def atanh(x):


def left_shift(x, y):
r"""Element-wise `bitwise binary: x << y`.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(0, 6, dtype=np.int32).reshape(2, 3))
out = F.left_shift(x, 2)
print(out.numpy())

Outputs:

.. testoutput::

[[ 0 4 8]
[12 16 20]]

"""
r"""Element-wise `bitwise binary: x << y`."""
return _elwise(x, y, mode=Elemwise.Mode.SHL)


@@ -461,8 +617,7 @@ def logical_xor(x, y):


def logaddexp(x: Tensor, y: Tensor) -> Tensor:
r"""Element-wise `numerically stable log(exp(x) + exp(y)`
"""
r"""Element-wise `numerically stable log(exp(x) + exp(y)`."""
return _elwise(x, y, mode=Elemwise.Mode.LOG_SUM_EXP)


@@ -470,28 +625,7 @@ def logaddexp(x: Tensor, y: Tensor) -> Tensor:


def equal(x, y):
r"""Element-wise `(x == y)`.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
y = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
out = F.equal(x, y)
print(out.numpy())

Outputs:

.. testoutput::

[[1. 1. 1.]
[1. 1. 1.]]
"""
r"""Element-wise `(x == y)`."""
return _elwise(x, y, mode=Elemwise.Mode.EQ)


@@ -539,7 +673,7 @@ def clip(x: Tensor, lower=None, upper=None) -> Tensor:
x: (Tensor): The input tensor.
lower: (Numberic,optional): lower-bound of the range to be clamped to.
upper: (Numberic,optional): upper-bound of the range to be clamped to.
Note:
* If both `lower` and `upper` are None, raises an AssertionError.
* If `lower` is bigger than `upper`, the result is same as `clip(Tensor(), upper, upper)`.
@@ -547,25 +681,6 @@ def clip(x: Tensor, lower=None, upper=None) -> Tensor:
Returns:
output clamped tensor. The result must have a data type determined by :ref:`dtype-promotion`.

Examples:

>>> import numpy as np
>>> x = Tensor([0,1,2,3,4])
>>> F.clip(x, 2, 4)
Tensor([2 2 2 3 4], dtype=int32, device=xpux:0)

>>> x = Tensor([0,1,2,3,4])
>>> F.clip(x, 4, 3)
Tensor([3 3 3 3 3], dtype=int32, device=xpux:0)

>>> x = F.arange(5)
>>> F.clip(x, lower=3)
Tensor([3. 3. 3. 3. 4.], device=xpux:0)

>>> x = F.arange(5, dtype=np.int32)
>>> F.clip(x, upper=2.1)
Tensor([0. 1. 2. 2.1 2.1], device=xpux:0)
"""
assert (
lower is not None or upper is not None


+ 86
- 307
imperative/python/megengine/functional/math.py View File

@@ -55,20 +55,9 @@ def isnan(inp: Tensor) -> Tensor:
result tensor.

Examples:

.. testcode::

from megengine import tensor
import megengine.functional as F

x = tensor([1, float("nan"), 0])
print(F.isnan(x).numpy())

Outputs:

.. testoutput::

[False True False]
>>> x = Tensor([1, float("nan"), 0])
>>> F.isnan(x).numpy()
array([False, True, False])
"""
return inp != inp

@@ -83,20 +72,9 @@ def isinf(inp: Tensor) -> Tensor:
result tensor.

Examples:

.. testcode::

from megengine import tensor
import megengine.functional as F

x = tensor([1, float("inf"), 0])
print(F.isinf(x).numpy())

Outputs:

.. testoutput::

[False True False]
>>> x = Tensor([1, float("inf"), 0])
>>> F.isinf(x).numpy()
array([False, True, False])
"""
return abs(inp).astype("float32") == float("inf")

@@ -111,20 +89,9 @@ def sign(inp: Tensor):
the sign of input tensor.

Examples:

.. testcode::

from megengine import tensor
import megengine.functional as F

x = tensor([1, -1, 0])
print(F.sign(x).numpy())

Outputs:

.. testoutput::

[ 1 -1 0]
>>> x = Tensor([1, -1, 0])
>>> F.sign(x)
Tensor([ 1 -1 0], dtype=int32, device=xpux:0)
"""
return (inp > 0).astype(inp.dtype) - (inp < 0).astype(inp.dtype)

@@ -148,22 +115,10 @@ def sum(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
out = F.sum(x)
print(out.numpy())

Outputs:

.. testoutput::

21
>>> import numpy as np
>>> x = Tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
>>> F.sum(x)
Tensor(21, dtype=int32, device=xpux:0)
"""
return inp.sum(axis=axis, keepdims=keepdims)

@@ -183,22 +138,10 @@ def prod(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
out = F.prod(x)
print(out.numpy())

Outputs:

.. testoutput::

720
>>> import numpy as np
>>> x = Tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
>>> F.prod(x)
Tensor(720, dtype=int32, device=xpux:0)
"""
return inp.prod(axis=axis, keepdims=keepdims)

@@ -221,22 +164,11 @@ def mean(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
out = F.mean(x)
print(out.numpy())

Outputs:

.. testoutput::

3.5
>>> import numpy as np
>>> x = Tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
>>> out = F.mean(x)
>>> out.numpy()
array(3.5, dtype=float32)
"""
return inp.mean(axis=axis, keepdims=keepdims)

@@ -259,22 +191,11 @@ def var(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
out = F.var(data)
print(out.numpy().round(decimals=4))

Outputs:

.. testoutput::

2.9167
>>> import numpy as np
>>> data = Tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
>>> out = F.var(data)
>>> out.numpy().round(decimals=4)
2.9167
"""
if axis is None:
m = mean(inp, axis=axis, keepdims=False)
@@ -302,22 +223,11 @@ def std(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
out = F.std(data, axis=1)
print(out.numpy().round(decimals=4))

Outputs:

.. testoutput::

[0.8165 0.8165]
>>> import numpy as np
>>> data = Tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
>>> out = F.std(data, axis=1)
>>> out.numpy().round(decimals=4)
array([0.8165, 0.8165], dtype=float32)
"""
return var(inp, axis=axis, keepdims=keepdims) ** 0.5

@@ -340,22 +250,10 @@ def min(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.min(x)
print(out.numpy())

Outputs:

.. testoutput::

1
>>> import numpy as np
>>> x = Tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
>>> F.min(x)
Tensor(1, dtype=int32, device=xpux:0)
"""
return inp.min(axis=axis, keepdims=keepdims)

@@ -378,22 +276,10 @@ def max(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.max(x)
print(out.numpy())

Outputs:

.. testoutput::

6
>>> import numpy as np
>>> x = Tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
>>> F.max(x)
Tensor(6, dtype=int32, device=xpux:0)
"""
return inp.max(axis=axis, keepdims=keepdims)

@@ -414,22 +300,11 @@ def norm(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(-3, 3, dtype=np.float32))
out = F.norm(x)
print(out.numpy().round(decimals=4))

Outputs:

.. testoutput::

4.3589
>>> import numpy as np
>>> x = Tensor(np.arange(-3, 3, dtype=np.float32))
>>> out = F.norm(x)
>>> out.numpy().round(decimals=4)
4.3589
"""
if axis is None:
if inp.ndim != 1:
@@ -463,22 +338,10 @@ def argmin(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.argmin(x)
print(out.numpy())

Outputs:

.. testoutput::

0
>>> import numpy as np
>>> x = Tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
>>> F.argmin(x)
Tensor(0, dtype=int32, device=xpux:0)
"""
if axis is None:
assert not keepdims, "can not set axis=None and keepdims=True"
@@ -522,22 +385,10 @@ def argmax(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.argmax(x)
print(out.numpy())

Outputs:

.. testoutput::

5
>>> import numpy as np
>>> x = Tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
>>> F.argmax(x)
Tensor(5, dtype=int32, device=xpux:0)
"""
if axis is None:
assert not keepdims, "can not set axis=None and keepdims=True"
@@ -603,22 +454,10 @@ def argsort(inp: Tensor, descending: bool = False) -> Tensor:
indices of int32 indicates how to sort the input.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.array([1,2], dtype=np.float32))
indices = F.argsort(x)
print(indices.numpy())

Outputs:

.. testoutput::

[0 1]
>>> import numpy as np
>>> x = Tensor(np.array([1,2], dtype=np.float32))
>>> F.argsort(x)
Tensor([0 1], dtype=int32, device=xpux:0)
"""
assert len(inp.shape) <= 2, "Input should be 1d or 2d"
if descending:
@@ -646,22 +485,11 @@ def sort(inp: Tensor, descending: bool = False) -> Tuple[Tensor, Tensor]:
tuple of two tensors `(sorted_tensor, indices_of_int32)`.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.array([1,2], dtype=np.float32))
out, indices = F.sort(x)
print(out.numpy())

Outputs:

.. testoutput::

[1. 2.]
>>> import numpy as np
>>> x = Tensor(np.array([1,2], dtype=np.float32))
>>> out, indices = F.sort(x)
>>> out.numpy()
array([1., 2.], dtype=float32)
"""
assert len(inp.shape) <= 2, "Input should be 1d or 2d"
if descending:
@@ -699,22 +527,11 @@ def topk(
tuple of two tensors ``(topk_tensor, indices_of_int32)``

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.array([2, 4, 6, 8, 7, 5, 3, 1], dtype=np.float32))
top, indices = F.topk(x, 5, descending=False)
print(top.numpy(), indices.numpy())

Outputs:

.. testoutput::

[1. 2. 3. 4. 5.] [7 0 6 1 5]
>>> import numpy as np
>>> x = Tensor(np.array([2, 4, 6, 8, 7, 5, 3, 1], dtype=np.float32))
>>> top, indices = F.topk(x, 5, descending=False)
>>> print(top.numpy(), indices.numpy())
[1. 2. 3. 4. 5.] [7 0 6 1 5]
"""
if descending:
k = -k
@@ -764,23 +581,12 @@ def matinv(inp: Tensor) -> Tensor:


Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

data = tensor([[1.0, 0.0], [1.0, 1.0]])
out = F.matinv(data)
print(out.numpy())

Outputs:

.. testoutput::

[[ 1. 0.]
[-1. 1.]]
>>> import numpy as np
>>> data = Tensor([[1.0, 0.0], [1.0, 1.0]])
>>> out = F.matinv(data)
>>> out.numpy()
array([[ 1., 0.],
[-1., 1.]], dtype=float32)
"""

(result,) = apply(builtin.MatrixInverse(), inp)
@@ -818,24 +624,13 @@ def matmul(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

data1 = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
data2 = tensor(np.arange(0, 6, dtype=np.float32).reshape(3, 2))
out = F.matmul(data1, data2)
print(out.numpy())

Outputs:

.. testoutput::

[[10. 13.]
[28. 40.]]
>>> import numpy as np
>>> data1 = Tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
>>> data2 = Tensor(np.arange(0, 6, dtype=np.float32).reshape(3, 2))
>>> out = F.matmul(data1, data2)
>>> out.numpy()
array([[10., 13.],
[28., 40.]], dtype=float32)
"""
return _matmul(inp1, inp2, transpose_a, transpose_b, compute_mode, format)

@@ -853,23 +648,12 @@ def dot(inp1: Tensor, inp2: Tensor) -> Tensor:
output value.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

data1 = tensor(np.arange(0, 6, dtype=np.float32))
data2 = tensor(np.arange(0, 6, dtype=np.float32))
out = F.dot(data1, data2)
print(out.numpy())

Outputs:

.. testoutput::

55.
>>> import numpy as np
>>> data1 = Tensor(np.arange(0, 6, dtype=np.float32))
>>> data2 = Tensor(np.arange(0, 6, dtype=np.float32))
>>> out = F.dot(data1, data2)
>>> out.numpy()
array(55., dtype=float32)
"""
op = builtin.Dot()
assert (
@@ -895,17 +679,12 @@ def svd(inp: Tensor, full_matrices=False, compute_uv=True) -> Tensor:
``U`` contains matrices orthonormal columns (i.e., the columns are left singular vectors). If ``full_matrices`` is ``True`` , the array must have shape ``(..., M, M)`` . If ``full_matrices`` is ``False`` , the array must have shape ``(..., M, K)`` , where ``K = min(M, N)`` .

Examples:

>>> import numpy as np
>>> x = Tensor(np.random.randn(9, 6))
>>> y = Tensor(np.random.randn(2, 7, 8, 3))

Reconstruction based on reduced SVD, 2D case:
>>> U, S, Vh = F.svd(x, full_matrices=False)
>>> print(U._tuple_shape, S._tuple_shape, Vh._tuple_shape)
(9, 6) (6,) (6, 6)

Reconsturction based on reduced SVD, 4D case:
>>> u, s, vh = F.svd(y, full_matrices=False)
>>> print(u._tuple_shape, s._tuple_shape, vh._tuple_shape)
(2, 7, 8, 3) (2, 7, 3) (2, 7, 3, 3)


+ 62
- 178
imperative/python/megengine/functional/nn.py View File

@@ -767,21 +767,11 @@ def hswish(x):
r"""Element-wise `x * relu6(x + 3) / 6`.

Example:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(5).astype(np.float32))
out = F.hswish(x)
print(out.numpy().round(decimals=4))

.. testoutput::

[0. 0.6667 1.6667 3. 4. ]

>>> import numpy as np
>>> x = Tensor(np.arange(5).astype(np.float32))
>>> out = F.hswish(x)
>>> out.numpy().round(decimals=4)
array([0. , 0.6667, 1.6667, 3. , 4. ], dtype=float32)
"""
return _elwise(x, mode=Elemwise.Mode.H_SWISH)

@@ -992,22 +982,11 @@ def softplus(inp: Tensor) -> Tensor:
= \log1p(\exp(-\text{abs}(x))) + \text{relu}(x)

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(-3, 3, dtype=np.float32))
y = F.softplus(x)
print(y.numpy().round(decimals=4))

Outputs:

.. testoutput::

[0.0486 0.1269 0.3133 0.6931 1.3133 2.1269]
>>> import numpy as np
>>> x = Tensor(np.arange(-3, 3, dtype=np.float32))
>>> y = F.softplus(x)
>>> y.numpy().round(decimals=4)
array([0.0486, 0.1269, 0.3133, 0.6931, 1.3133, 2.1269], dtype=float32)
"""
softplus = _get_softplus_op(inp.dtype, inp.device)
(oup,) = softplus(inp)
@@ -1030,23 +1009,12 @@ def logsoftmax(inp: Tensor, axis: Union[int, Sequence[int]]) -> Tensor:
= x - \text{logsumexp}(x)

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
y = F.logsoftmax(x, axis=1)
print(y.numpy().round(decimals=4))

Outputs:

.. testoutput::

[[-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]
[-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]]
>>> import numpy as np
>>> x = Tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
>>> y = F.logsoftmax(x, axis=1)
>>> y.numpy().round(decimals=4)
array([[-4.4519, -3.4519, -2.4519, -1.4519, -0.4519],
[-4.4519, -3.4519, -2.4519, -1.4519, -0.4519]], dtype=float32)
"""
return inp - logsumexp(inp, axis, keepdims=True)

@@ -1094,23 +1062,12 @@ def logsigmoid(inp: Tensor) -> Tensor:
= - \text{softplus}(-x)

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(-5, 5, dtype=np.float32))
y = F.logsigmoid(x)
print(y.numpy().round(decimals=4))

Outputs:

.. testoutput::

[-5.0067 -4.0182 -3.0486 -2.1269 -1.3133 -0.6931 -0.3133 -0.1269 -0.0486
-0.0181]
>>> import numpy as np
>>> x = Tensor(np.arange(-5, 5, dtype=np.float32))
>>> y = F.logsigmoid(x)
>>> y.numpy().round(decimals=4)
array([-5.0067, -4.0182, -3.0486, -2.1269, -1.3133, -0.6931, -0.3133,
-0.1269, -0.0486, -0.0181], dtype=float32)
"""
logsigmoid = _get_logsigmoid_op(inp.dtype, inp.device)
(oup,) = logsigmoid(inp)
@@ -1139,22 +1096,11 @@ def logsumexp(
b = \max(x_j)

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
y = F.logsumexp(x, axis=1, keepdims=False)
print(y.numpy().round(decimals=4))

Outputs:

.. testoutput::

[-0.5481 4.4519]
>>> import numpy as np
>>> x = Tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
>>> y = F.logsumexp(x, axis=1, keepdims=False)
>>> y.numpy().round(decimals=4)
array([-0.5481, 4.4519], dtype=float32)
"""
max_value = max(inp.detach(), axis, keepdims=True)
if keepdims:
@@ -1183,23 +1129,12 @@ def softmax(inp: Tensor, axis: Optional[int] = None) -> Tensor:
See :class:`~.module.Softmax` for more details.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
out = F.softmax(x)
print(out.numpy().round(decimals=4))

Outputs:

.. testoutput::

[[0.0117 0.0317 0.0861 0.2341 0.6364]
[0.0117 0.0317 0.0861 0.2341 0.6364]]
>>> import numpy as np
>>> x = Tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
>>> out = F.softmax(x)
>>> out.numpy().round(decimals=4)
array([[0.0117, 0.0317, 0.0861, 0.2341, 0.6364],
[0.0117, 0.0317, 0.0861, 0.2341, 0.6364]], dtype=float32)
"""
if axis is None:
axis = _get_softmax_axis(len(inp.shape))
@@ -1622,28 +1557,14 @@ def dropout(inp: Tensor, drop_prob: float, training: bool = True) -> Tensor:
the ouput tensor

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

# test training mode
data = tensor(np.ones(10000000, dtype=np.float32))
out = F.nn.dropout(data, 1.0 / 3.0, training=True)
assert not out.numpy().all()

# test eval mode
out = F.nn.dropout(data, 1.0 / 3.0, training=False)
assert out.numpy().all()

Outputs:

.. testoutput::
:options: +SKIP

[1.5 1.5 0. 1.5 1.5 1.5 1.5 1.5 1.5 1.5]
>>> import numpy as np
>>> data = Tensor(np.ones(10000000, dtype=np.float32))
>>> out = F.nn.dropout(data, 1.0 / 3.0, training=True)
>>> assert not out.numpy().all()
>>> out = F.nn.dropout(data, 1.0 / 3.0, training=False)
>>> assert out.numpy().all()
>>> out.numpy()
array([1., 1., 1., ..., 1., 1., 1.], dtype=float32)
"""
assert 0 <= drop_prob < 1
if not training or drop_prob == 0:
@@ -1663,24 +1584,12 @@ def one_hot(inp: Tensor, num_classes: int) -> Tensor:
num_classes: number of classes denotes the last dimension of the output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(1, 4, dtype=np.int32))
out = F.one_hot(x, num_classes=4)
print(out.numpy())

Outputs:

.. testoutput::

[[0 1 0 0]
[0 0 1 0]
[0 0 0 1]]
>>> import numpy as np
>>> x = Tensor(np.arange(1, 4, dtype=np.int32))
>>> F.one_hot(x, num_classes=4)
Tensor([[0 1 0 0]
[0 0 1 0]
[0 0 0 1]], dtype=int32, device=xpux:0)
"""
zeros_tensor = zeros(
list(inp.shape) + [num_classes], dtype=inp.dtype, device=inp.device
@@ -1731,22 +1640,11 @@ def indexing_one_hot(
keepdims: whether not to remove the axis in result. Default: False

Examples:

.. testcode::

import megengine.functional as F
from megengine import tensor

src = tensor([[1.0, 2.0]])
index = tensor([0])
val = F.indexing_one_hot(src, index)
print(val.numpy())

Outputs:

.. testoutput::

[1.]
>>> src = Tensor([[1.0, 2.0]])
>>> index = Tensor([0])
>>> val = F.indexing_one_hot(src, index)
>>> val.numpy()
array([1.], dtype=float32)
"""
assert isinstance(src, Tensor), "src must be of Tensor type"
op = builtin.IndexingOneHot(axis=axis)
@@ -1866,7 +1764,6 @@ def pad(
constant_val: Fill value for ``'constant'`` padding. Default: 0

Examples:

>>> import numpy as np
>>> inp = Tensor([[1., 2., 3.],[4., 5., 6.]])
>>> inp
@@ -1944,30 +1841,17 @@ def local_response_norm(
beta: hyperparameter beta. The default value is 0.75.

Example:

.. testcode::

from megengine import tensor
import megengine.functional as f
import numpy as np

inp = tensor(np.arange(25, dtype=np.float32).reshape(1,1,5,5))
GT = np.array([[[[ 0., 0.999925, 1.9994003, 2.9979765, 3.9952066],
[ 4.9906454, 5.983851, 6.974385, 7.961814, 8.945709 ],
[ 9.925651, 10.90122, 11.872011, 12.837625, 13.7976675],
[14.751757, 15.699524, 16.640602, 17.574642, 18.501305 ],
[19.420258, 20.331186, 21.233786, 22.127764, 23.012836 ]]]])

out = f.local_response_norm(inp, kernel_size=3, k=1.0, alpha=1e-4, beta=0.75)
np.testing.assert_allclose(GT, out.numpy(), rtol=1e-6, atol=1e-6)
print('pass')

Outputs:

.. testoutput::

>>> import numpy as np
>>> inp = Tensor(np.arange(25, dtype=np.float32).reshape(1,1,5,5))
>>> GT = np.array([[[[ 0., 0.999925, 1.9994003, 2.9979765, 3.9952066],
... [ 4.9906454, 5.983851, 6.974385, 7.961814, 8.945709 ],
... [ 9.925651, 10.90122, 11.872011, 12.837625, 13.7976675],
... [14.751757, 15.699524, 16.640602, 17.574642, 18.501305 ],
... [19.420258, 20.331186, 21.233786, 22.127764, 23.012836 ]]]])
>>> out = F.local_response_norm(inp, kernel_size=3, k=1.0, alpha=1e-4, beta=0.75)
>>> np.testing.assert_allclose(GT, out.numpy(), rtol=1e-6, atol=1e-6)
>>> print('pass')
pass

"""
op = builtin.LRN(n=kernel_size, k=k, alpha=alpha, beta=beta,)
(output,) = apply(op, inp)


+ 164
- 415
imperative/python/megengine/functional/tensor.py View File

@@ -101,23 +101,13 @@ def eye(N, M=None, *, dtype="float32", device: Optional[CompNode] = None) -> Ten
eye matrix.

Examples:

.. testcode::

import numpy as np
import megengine.functional as F

out = F.eye(4, 6, dtype=np.float32)
print(out.numpy())

Outputs:

.. testoutput::

[[1. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0.]
[0. 0. 0. 1. 0. 0.]]
>>> import numpy as np
>>> out = F.eye(4, 6, dtype=np.float32)
>>> out.numpy()
array([[1., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0., 0.]], dtype=float32)
"""
if M is not None:
if isinstance(N, Tensor) or isinstance(M, Tensor):
@@ -156,21 +146,11 @@ def full(
a tensor where every element is equal to ``value``.

Examples:

.. testcode::

import numpy as np
import megengine.functional as F

out = F.full([2,3], 1.5)
print(out.numpy())

Outputs:

.. testoutput::

[[1.5 1.5 1.5]
[1.5 1.5 1.5]]
>>> import numpy as np
>>> out = F.full([2,3], 1.5)
>>> out.numpy()
array([[1.5, 1.5, 1.5],
[1.5, 1.5, 1.5]], dtype=float32)
"""

if isinstance(shape, int):
@@ -202,30 +182,16 @@ def ones(
a tensor containing ones.

Examples:

.. testcode::

import megengine.functional as F

out = F.ones(5)
print(out.numpy())
out = F.ones((5, ), dtype='int32')
print(out.numpy())
out = F.ones((2, 2))
print(out.numpy())
out = F.ones([2, 1])
print(out.numpy())

Outputs:

.. testoutput::

[1. 1. 1. 1. 1.]
[1 1 1 1 1]
[[1. 1.]
[1. 1.]]
[[1.]
[1.]]
>>> F.ones(5)
Tensor([1. 1. 1. 1. 1.], device=xpux:0)
>>> F.ones((5, ), dtype='int32')
Tensor([1 1 1 1 1], dtype=int32, device=xpux:0)
>>> F.ones((2, 2))
Tensor([[1. 1.]
[1. 1.]], device=xpux:0)
>>> F.ones([2, 1])
Tensor([[1.]
[1.]], device=xpux:0)
"""
return full(shape, 1.0, dtype=dtype, device=device)

@@ -306,24 +272,11 @@ def full_like(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

inp = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.full_like(inp, 2)
print(out.numpy())

Outputs:

.. testoutput::

[[2 2 2]
[2 2 2]]

>>> import numpy as np
>>> inp = Tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
>>> F.full_like(inp, 2)
Tensor([[2 2 2]
[2 2 2]], dtype=int32, device=xpux:0)
"""
x = Const(value, inp.dtype, inp.device, inp)
if inp.ndim == 0:
@@ -342,23 +295,12 @@ def broadcast_to(inp: Tensor, shape: Union[int, Iterable[int]]) -> Tensor:
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

data = tensor(np.arange(0, 3, dtype=np.float32).reshape(3))
out = F.broadcast_to(data, (2, 3))
print(out.numpy())

Outputs:

.. testoutput::

[[0. 1. 2.]
[0. 1. 2.]]
>>> import numpy as np
>>> data = Tensor(np.arange(0, 3, dtype=np.float32).reshape(3))
>>> out = F.broadcast_to(data, (2, 3))
>>> out.numpy()
array([[0., 1., 2.],
[0., 1., 2.]], dtype=float32)
"""
return broadcast_cpp(inp, shape)

@@ -375,26 +317,15 @@ def concat(inps: Iterable[Tensor], axis: int = 0, device=None) -> Tensor:
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

data1 = tensor(np.arange(0, 6, dtype=np.float32).reshape((2, 3)))
data2 = tensor(np.arange(6, 12, dtype=np.float32).reshape((2, 3)))
out = F.concat([data1, data2])
print(out.numpy())

Outputs:

.. testoutput::

[[ 0. 1. 2.]
[ 3. 4. 5.]
[ 6. 7. 8.]
[ 9. 10. 11.]]
>>> import numpy as np
>>> data1 = Tensor(np.arange(0, 6, dtype=np.float32).reshape((2, 3)))
>>> data2 = Tensor(np.arange(6, 12, dtype=np.float32).reshape((2, 3)))
>>> out = F.concat([data1, data2])
>>> out.numpy()
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]], dtype=float32)
"""
if len(inps) == 1:
return inps[0]
@@ -419,24 +350,13 @@ def stack(inps, axis=0, device=None):
output concatenated tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x1 = tensor(np.arange(0, 3, dtype=np.float32).reshape((3)))
x2 = tensor(np.arange(6, 9, dtype=np.float32).reshape((3)))
out = F.stack([x1, x2], axis=0)
print(out.numpy())

Outputs:

.. testoutput::

[[0. 1. 2.]
[6. 7. 8.]]
>>> import numpy as np
>>> x1 = Tensor(np.arange(0, 3, dtype=np.float32).reshape((3)))
>>> x2 = Tensor(np.arange(6, 9, dtype=np.float32).reshape((3)))
>>> out = F.stack([x1, x2], axis=0)
>>> out.numpy()
array([[0., 1., 2.],
[6., 7., 8.]], dtype=float32)
"""
if len(inps) > 0 and not isinstance(inps[0].shape, inps[0].__class__):
shapes = {arr.shape for arr in inps}
@@ -460,27 +380,15 @@ def split(inp, nsplits_or_sections, axis=0):
output tensor list.

Examples:

.. testcode::

import os
import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.random.random((10, 20)), dtype=np.float32)
y = F.split(x, 3)
z = F.split(x, [6, 17], axis=1)

print([i.numpy().shape for i in y])
print([i.numpy().shape for i in z])

Outputs:

.. testoutput::

[(4, 20), (3, 20), (3, 20)]
[(10, 6), (10, 11), (10, 3)]
>>> import os
>>> import numpy as np
>>> x = Tensor(np.random.random((10, 20)), dtype=np.float32)
>>> y = F.split(x, 3)
>>> z = F.split(x, [6, 17], axis=1)
>>> print([i.numpy().shape for i in y])
[(4, 20), (3, 20), (3, 20)]
>>> print([i.numpy().shape for i in z])
[(10, 6), (10, 11), (10, 3)]
"""

return split_cpp(inp, nsplits_or_sections, axis)
@@ -535,25 +443,13 @@ def gather(inp: Tensor, axis: int, index: Tensor) -> Tensor:
output tensor.

Examples:

.. testcode::

import megengine.functional as F
from megengine import tensor

inp = tensor([
[1,2], [3,4], [5,6],
])
index = tensor([[0,2], [1,0]])
oup = F.gather(inp, 0, index)
print(oup.numpy())

Outputs:

.. testoutput::

[[1 6]
[3 2]]
>>> inp = Tensor([
... [1,2], [3,4], [5,6],
... ])
>>> index = Tensor([[0,2], [1,0]])
>>> F.gather(inp, 0, index)
Tensor([[1 6]
[3 2]], dtype=int32, device=xpux:0)
"""
input_shape = inp.shape
index_shape = index.shape
@@ -628,26 +524,15 @@ def scatter(inp: Tensor, axis: int, index: Tensor, source: Tensor) -> Tensor:
output tensor.

Examples:

.. testcode::

import numpy as np
import megengine.functional as F
from megengine import tensor

inp = tensor(np.zeros(shape=(3,5),dtype=np.float32))
source = tensor([[0.9935,0.9465,0.2256,0.8926,0.4396],[0.7723,0.0718,0.5939,0.357,0.4576]])
index = tensor([[0,2,0,2,1],[2,0,1,1,2]])
oup = F.scatter(inp, 0, index,source)
print(oup.numpy())

Outputs:

.. testoutput::

[[0.9935 0.0718 0.2256 0. 0. ]
[0. 0. 0.5939 0.357 0.4396]
[0.7723 0.9465 0. 0.8926 0.4576]]
>>> import numpy as np
>>> inp = Tensor(np.zeros(shape=(3,5),dtype=np.float32))
>>> source = Tensor([[0.9935,0.9465,0.2256,0.8926,0.4396],[0.7723,0.0718,0.5939,0.357,0.4576]])
>>> index = Tensor([[0,2,0,2,1],[2,0,1,1,2]])
>>> oup = F.scatter(inp, 0, index,source)
>>> oup.numpy()
array([[0.9935, 0.0718, 0.2256, 0. , 0. ],
[0. , 0. , 0.5939, 0.357 , 0.4396],
[0.7723, 0.9465, 0. , 0.8926, 0.4576]], dtype=float32)
"""
input_shape = inp.shape
index_shape = index.shape
@@ -734,25 +619,15 @@ def where(mask: Tensor, x: Tensor, y: Tensor) -> Tensor:
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F
mask = tensor(np.array([[True, False], [False, True]], dtype=np.bool))
x = tensor(np.array([[1, np.inf], [np.nan, 4]],
dtype=np.float32))
y = tensor(np.array([[5, 6], [7, 8]], dtype=np.float32))
out = F.where(mask, x, y)
print(out.numpy())

Outputs:

.. testoutput::

[[1. 6.]
[7. 4.]]
>>> import numpy as np
>>> mask = Tensor(np.array([[True, False], [False, True]], dtype=np.bool))
>>> x = Tensor(np.array([[1, np.inf], [np.nan, 4]],
... dtype=np.float32))
>>> y = Tensor(np.array([[5, 6], [7, 8]], dtype=np.float32))
>>> out = F.where(mask, x, y)
>>> out.numpy()
array([[1., 6.],
[7., 4.]], dtype=float32)
"""

if not isinstance(x, Tensor):
@@ -791,23 +666,13 @@ def cond_take(mask: Tensor, x: Tensor) -> Tensor:
x: input tensor from which to take elements.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F
mask = tensor(np.array([[True, False], [False, True]], dtype=np.bool_))
x = tensor(np.array([[1, np.inf], [np.nan, 4]],
dtype=np.float32))
v, index = F.cond_take(mask, x)
print(v.numpy(), index.numpy())

Outputs:

.. testoutput::

[1. 4.] [0 3]
>>> import numpy as np
>>> mask = Tensor(np.array([[True, False], [False, True]], dtype=np.bool_))
>>> x = Tensor(np.array([[1, np.inf], [np.nan, 4]],
... dtype=np.float32))
>>> v, index = F.cond_take(mask, x)
>>> print(v.numpy(), index.numpy())
[1. 4.] [0 3]
"""
if not isinstance(x, (Tensor, SymbolVar)):
raise TypeError("input must be a tensor")
@@ -846,22 +711,11 @@ def transpose(inp: Tensor, pattern: Iterable[int]) -> Tensor:
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([[1, 1], [0, 0]], dtype=np.int32))
out = F.transpose(x, (1, 0))
print(out.numpy())

Outputs:

.. testoutput::

[[1 0]
[1 0]]
>>> import numpy as np
>>> x = Tensor(np.array([[1, 1], [0, 0]], dtype=np.int32))
>>> F.transpose(x, (1, 0))
Tensor([[1 0]
[1 0]], dtype=int32, device=xpux:0)
"""
return inp.transpose(pattern)

@@ -879,18 +733,16 @@ def reshape(inp: Tensor, target_shape: Iterable[int]) -> Tensor:
an output tensor having the same data type, elements, and underlying element order as `inp` .

Examples:

>>> x = F.arange(12)
>>> x
Tensor([ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11.], device=xpux:0)
>>> F.reshape(x, (3, 4))
Tensor([[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], device=xpux:0)
>>> F.reshape(x, (2, -1))
Tensor([[ 0. 1. 2. 3. 4. 5.]
[ 6. 7. 8. 9. 10. 11.]], device=xpux:0)
>>> x = F.arange(12)
>>> x
Tensor([ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11.], device=xpux:0)
>>> F.reshape(x, (3, 4))
Tensor([[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], device=xpux:0)
>>> F.reshape(x, (2, -1))
Tensor([[ 0. 1. 2. 3. 4. 5.]
[ 6. 7. 8. 9. 10. 11.]], device=xpux:0)
"""
return inp.reshape(target_shape)

@@ -907,27 +759,16 @@ def flatten(inp: Tensor, start_axis: int = 0, end_axis: int = -1) -> Tensor:
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

inp_shape = (2, 2, 3, 3)
x = tensor(
np.arange(36, dtype=np.int32).reshape(inp_shape),
)
out = F.flatten(x, 2)
print(x.numpy().shape)
print(out.numpy().shape)

Outputs:

.. testoutput::

(2, 2, 3, 3)
(2, 2, 9)
>>> import numpy as np
>>> inp_shape = (2, 2, 3, 3)
>>> x = Tensor(
... np.arange(36, dtype=np.int32).reshape(inp_shape),
... )
>>> out = F.flatten(x, 2)
>>> x.numpy().shape
(2, 2, 3, 3)
>>> out.numpy().shape
(2, 2, 9)
"""
target_shape = tuple(inp.shape[i] for i in range(start_axis)) + (-1,)
if end_axis != -1:
@@ -946,22 +787,11 @@ def expand_dims(inp: Tensor, axis: Union[int, Sequence[int]]) -> Tensor:
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor([1, 2])
out = F.expand_dims(x, 0)
print(out.numpy().shape)

Outputs:

.. testoutput::

(1, 2)
>>> import numpy as np
>>> x = Tensor([1, 2])
>>> out = F.expand_dims(x, 0)
>>> out.numpy().shape
(1, 2)
"""

return expand_dims_cpp(inp, axis)
@@ -978,22 +808,11 @@ def squeeze(inp: Tensor, axis: Optional[Union[int, Sequence[int]]] = None) -> Te
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.array([1, 2], dtype=np.int32).reshape(1, 1, 2, 1))
out = F.squeeze(x, 3)
print(out.numpy().shape)

Outputs:

.. testoutput::

(1, 1, 2)
>>> import numpy as np
>>> x = Tensor(np.array([1, 2], dtype=np.int32).reshape(1, 1, 2, 1))
>>> out = F.squeeze(x, 3)
>>> out.numpy().shape
(1, 1, 2)
"""
return squeeze_cpp(inp, axis)

@@ -1017,20 +836,10 @@ def linspace(
generated tensor.

Examples:

.. testcode::

import numpy as np
import megengine.functional as F

a = F.linspace(3, 10, 5)
print(a.numpy())

Outputs:

.. testoutput::

[ 3. 4.75 6.5 8.25 10. ]
>>> import numpy as np
>>> a = F.linspace(3, 10, 5)
>>> a.numpy()
array([ 3. , 4.75, 6.5 , 8.25, 10. ], dtype=float32)
"""
for item in (start, stop, num):
cur_device = getattr(item, "device", None)
@@ -1089,7 +898,6 @@ def arange(
if ``stop - start`` and ``step`` have the same sign, and length 0 otherwise.

Examples:

>>> F.arange(5)
Tensor([0. 1. 2. 3. 4.], device=xpux:0)
>>> F.arange(1, 4)
@@ -1124,25 +932,13 @@ def repeat(inp: Tensor, repeats: int, axis: Optional[int] = None):
output tensor.

Examples:

.. testcode::

import numpy as np
import megengine.functional as F
from megengine import tensor

x = tensor([[1, 2], [3, 4]], np.int32)
y = F.repeat(x, 2, axis=0)
print(y.numpy())

Outputs:

.. testoutput::

[[1 2]
[1 2]
[3 4]
[3 4]]
>>> import numpy as np
>>> x = Tensor([[1, 2], [3, 4]], np.int32)
>>> F.repeat(x, 2, axis=0)
Tensor([[1 2]
[1 2]
[3 4]
[3 4]], dtype=int32, device=xpux:0)
"""
if axis is None:
inp = inp.reshape(-1) # flatten
@@ -1211,25 +1007,13 @@ def tile(inp: Tensor, reps: Iterable[int]):


Examples:

.. testcode::

import numpy as np
import megengine.functional as F
from megengine import tensor

x = tensor([[1, 2], [3, 4]], np.int32)
y = F.tile(x, (2,1))
print(y.numpy())

Outputs:

.. testoutput::

[[1 2]
[3 4]
[1 2]
[3 4]]
>>> import numpy as np
>>> x = Tensor([[1, 2], [3, 4]], np.int32)
>>> F.tile(x, (2,1))
Tensor([[1 2]
[3 4]
[1 2]
[3 4]], dtype=int32, device=xpux:0)
"""
shape = astensor1d(inp.shape, inp, dtype="int32", device=inp.device)
reps = astensor1d(reps, inp, dtype="int32", device=inp.device)
@@ -1263,28 +1047,17 @@ def copy(inp, device=None):
device: destination device.

Examples:

.. testcode::

import numpy as np
import platform
from megengine import tensor
from megengine.device import get_device_count
import megengine.functional as F

x = tensor([1, 2, 3], np.int32)
if 1 == get_device_count("gpu"):
y = F.copy(x, "cpu1")
print(y.numpy())
else:
y = F.copy(x, "xpu1")
print(y.numpy())

Outputs:

.. testoutput::

[1 2 3]
>>> import numpy as np
>>> import platform
>>> from megengine.device import get_device_count
>>> x = Tensor([1, 2, 3], np.int32)
>>> if 1 == get_device_count("gpu"):
... y = F.copy(x, "cpu1")
... print(y.numpy())
... else:
... y = F.copy(x, "xpu1")
... print(y.numpy())
[1 2 3]
"""
if device is None:
return apply(Identity(), inp)[0]
@@ -1309,24 +1082,12 @@ def roll(
Duplicate axes is allowed if it is a tuple. Default: None.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor([[1,2],[3,4],[5,6]], np.int32)
y = F.roll(x, 1, 0)
print(y.numpy())

Outputs:

.. testoutput::

[[5 6]
[1 2]
[3 4]]
>>> import numpy as np
>>> x = Tensor([[1,2],[3,4],[5,6]], np.int32)
>>> F.roll(x, 1, 0)
Tensor([[5 6]
[1 2]
[3 4]], dtype=int32, device=xpux:0)
"""
shp_bak = None
if axis is None:
@@ -1371,22 +1132,10 @@ def cumsum(inp: Tensor, axis: int):
axis: axis along which cumsum is performed.

Examples:

.. testcode::

from megengine import tensor
import megengine.functional as F

x = tensor([[1, 2, 3], [4, 5, 6]], "int32")
y = F.cumsum(x, 1)
print(y.numpy())

Outputs:

.. testoutput::

[[ 1 3 6]
[ 4 9 15]]
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], "int32")
>>> F.cumsum(x, 1)
Tensor([[ 1 3 6]
[ 4 9 15]], dtype=int32, device=xpux:0)
"""
assert isinstance(inp, Tensor), "input of cumsum must be type of Tensor"
assert axis >= 0 and axis < inp.ndim, "input axis {} out of bound".format(axis)


+ 63
- 151
imperative/python/megengine/functional/vision.py View File

@@ -76,22 +76,11 @@ def cvt_color(inp: Tensor, mode: str = ""):


Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.functional as F

x = mge.tensor(np.array([[[[-0.58675045, 1.7526233, 0.10702174]]]]).astype(np.float32))
y = F.vision.cvt_color(x, mode="RGB2GRAY")
print(y.numpy())

Outputs:

.. testoutput::

[[[[0.86555195]]]]
>>> import numpy as np
>>> x = mge.tensor(np.array([[[[-0.58675045, 1.7526233, 0.10702174]]]]).astype(np.float32))
>>> y = F.vision.cvt_color(x, mode="RGB2GRAY")
>>> y.numpy()
array([[[[0.86555195]]]], dtype=float32)
"""
mode = mode.upper() if "YCrCb" not in mode else mode
assert mode in builtin.CvtColor.Mode.__dict__, "unspport mode for cvt_color"
@@ -122,25 +111,14 @@ def roi_pooling(
``K, C, output_shape[0], output_shape[1])`` feature of rois.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

np.random.seed(42)
inp = tensor(np.random.randn(1, 1, 128, 128))
rois = tensor(np.random.random((4, 5)))
y = F.vision.roi_pooling(inp, rois, (2, 2))
print(y.numpy()[0].round(decimals=4))

Outputs:

.. testoutput::

[[[-0.1383 -0.1383]
[-0.5035 -0.5035]]]
>>> import numpy as np
>>> np.random.seed(42)
>>> inp = Tensor(np.random.randn(1, 1, 128, 128))
>>> rois = Tensor(np.random.random((4, 5)))
>>> y = F.vision.roi_pooling(inp, rois, (2, 2))
>>> y.numpy()[0].round(decimals=4)
array([[[-0.1383, -0.1383],
[-0.5035, -0.5035]]], dtype=float32)
"""
assert mode.lower() in ["max", "average"], "only max/average mode is supported"
if isinstance(output_shape, int):
@@ -218,25 +196,14 @@ def roi_align(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

np.random.seed(42)
inp = tensor(np.random.randn(1, 1, 128, 128))
rois = tensor(np.random.random((4, 5)))
y = F.vision.roi_align(inp, rois, (2, 2))
print(y.numpy()[0].round(decimals=4))

Outputs:

.. testoutput::

[[[0.175 0.175 ]
[0.1359 0.1359]]]
>>> import numpy as np
>>> np.random.seed(42)
>>> inp = Tensor(np.random.randn(1, 1, 128, 128))
>>> rois = Tensor(np.random.random((4, 5)))
>>> y = F.vision.roi_align(inp, rois, (2, 2))
>>> y.numpy()[0].round(decimals=4)
array([[[0.175 , 0.175 ],
[0.1359, 0.1359]]], dtype=float32)
"""
if inp.dtype != np.float32:
inp = inp.astype(np.float32)
@@ -285,27 +252,15 @@ def nms(
max_output should be specified and should have valid positive value under tracing.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = np.zeros((100,4))
np.random.seed(42)
x[:,:2] = np.random.rand(100,2)*20
x[:,2:] = np.random.rand(100,2)*20 + 100
scores = tensor(np.random.rand(100))
inp = tensor(x)
result = F.vision.nms(inp, scores, iou_thresh=0.7)
print(result.numpy())

Outputs:

.. testoutput::

[75 69]
>>> import numpy as np
>>> x = np.zeros((100,4))
>>> np.random.seed(42)
>>> x[:,:2] = np.random.rand(100,2)*20
>>> x[:,2:] = np.random.rand(100,2)*20 + 100
>>> scores = Tensor(np.random.rand(100))
>>> inp = Tensor(x)
>>> F.vision.nms(inp, scores, iou_thresh=0.7)
Tensor([75 69], dtype=int32, device=xpux:0)
"""
assert (
boxes.ndim == 2 and boxes.shape[1] == 4
@@ -357,27 +312,17 @@ def remap(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F
inp_shape = (1, 1, 4, 4)
inp = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
map_xy_shape = (1, 2, 2, 2)
map_xy = tensor(np.array([[[1., 0.],[0., 1.]],
[[0., 1.],[0., 1.]]],
dtype=np.float32).reshape(map_xy_shape))
out = F.vision.remap(inp, map_xy)
print(out.numpy())

Outputs:

.. testoutput::

[[[[1. 4.]
[4. 4.]]]]
>>> import numpy as np
>>> inp_shape = (1, 1, 4, 4)
>>> inp = Tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
>>> map_xy_shape = (1, 2, 2, 2)
>>> map_xy = Tensor(np.array([[[1., 0.],[0., 1.]],
... [[0., 1.],[0., 1.]]],
... dtype=np.float32).reshape(map_xy_shape))
>>> out = F.vision.remap(inp, map_xy)
>>> out.numpy()
array([[[[1., 4.],
[4., 4.]]]], dtype=float32)
"""
conv_format = _config._get_actual_op_param("NCHW", _config.__conv_format)

@@ -478,29 +423,18 @@ def warp_perspective(
The transformation matrix is the inverse of that used by `cv2.warpPerspective`.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(np.array([[1., 0., 1.],
[0., 1., 1.],
[0., 0., 1.]], dtype=np.float32).reshape(M_shape))
out = F.vision.warp_perspective(x, M, (2, 2))
print(out.numpy())

Outputs:

.. testoutput::

[[[[ 5. 6.]
[ 9. 10.]]]]
>>> import numpy as np
>>> inp_shape = (1, 1, 4, 4)
>>> x = Tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
>>> M_shape = (1, 3, 3)
>>> # M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
>>> M = Tensor(np.array([[1., 0., 1.],
... [0., 1., 1.],
... [0., 0., 1.]], dtype=np.float32).reshape(M_shape))
>>> out = F.vision.warp_perspective(x, M, (2, 2))
>>> out.numpy()
array([[[[ 5., 6.],
[ 9., 10.]]]], dtype=float32)
"""
if inp.dtype == np.float32:
mat = mat.astype("float32")
@@ -547,27 +481,16 @@ def interpolate(
output tensor.

Examples:

.. testcode::

import numpy as np
from megengine import tensor
import megengine.functional as F

x = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
out = F.vision.interpolate(x, [4, 4], align_corners=False)
print(out.numpy())
out2 = F.vision.interpolate(x, scale_factor=2.)
np.testing.assert_allclose(out.numpy(), out2.numpy())

Outputs:

.. testoutput::

[[[[1. 1.25 1.75 2. ]
[1.5 1.75 2.25 2.5 ]
[2.5 2.75 3.25 3.5 ]
[3. 3.25 3.75 4. ]]]]
>>> import numpy as np
>>> x = Tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
>>> out = F.vision.interpolate(x, [4, 4], align_corners=False)
>>> out.numpy()
array([[[[1. , 1.25, 1.75, 2. ],
[1.5 , 1.75, 2.25, 2.5 ],
[2.5 , 2.75, 3.25, 3.5 ],
[3. , 3.25, 3.75, 4. ]]]], dtype=float32)
>>> out2 = F.vision.interpolate(x, scale_factor=2.)
>>> np.testing.assert_allclose(out.numpy(), out2.numpy())
"""
mode = mode.lower()
if mode not in ["bilinear", "linear", "bicubic", "nearest"]:
@@ -693,17 +616,6 @@ def nvof(src: Tensor, precision: int = 1) -> Tensor:
Returns:
output tensor with shape: ``(n, t-1, (h+out_grid_size-1)//out_grid_size, (w+out_grid_size-1)//out_grid_size, c2)``.
By default, out_grid_size = 4. dtype: int16.

.. code-block:: python

import numpy as np
from megengine import tensor
import megengine.functional as F

x = np.random.random_integers(0, 255, (1,2,224,244,4)).astype("uint8")
src = tensor(x)
result = F.nn.nvof(src, precision=1)
print(result.numpy())
"""
assert src.ndim == 5 and src.shape[4] == 4



+ 47
- 122
imperative/python/megengine/module/activation.py View File

@@ -27,24 +27,13 @@ class Softmax(Module):
softmax will apply along the highest ranked axis.

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

data = mge.tensor(np.array([-2,-1,0,1,2]).astype(np.float32))
softmax = M.Softmax()
output = softmax(data)
with np.printoptions(precision=6):
print(output.numpy())

Outputs:

.. testoutput::

[0.011656 0.031685 0.086129 0.234122 0.636409]
>>> import numpy as np
>>> data = mge.tensor(np.array([-2,-1,0,1,2]).astype(np.float32))
>>> softmax = M.Softmax()
>>> output = softmax(data)
>>> with np.printoptions(precision=6):
... print(output.numpy())
[0.011656 0.031685 0.086129 0.234122 0.636409]
"""

def __init__(self, axis=None, **kwargs):
@@ -66,24 +55,13 @@ class Sigmoid(Module):
\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

data = mge.tensor(np.array([-2,-1,0,1,2,]).astype(np.float32))
sigmoid = M.Sigmoid()
output = sigmoid(data)
with np.printoptions(precision=6):
print(output.numpy())

Outputs:

.. testoutput::

[0.119203 0.268941 0.5 0.731059 0.880797]
>>> import numpy as np
>>> data = mge.tensor(np.array([-2,-1,0,1,2,]).astype(np.float32))
>>> sigmoid = M.Sigmoid()
>>> output = sigmoid(data)
>>> with np.printoptions(precision=6):
... print(output.numpy())
[0.119203 0.268941 0.5 0.731059 0.880797]
"""

def forward(self, inputs):
@@ -98,24 +76,13 @@ class SiLU(Module):
\text{SiLU}(x) = \frac{x}{1 + \exp(-x)}

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

data = mge.tensor(np.array([-2,-1,0,1,2,]).astype(np.float32))
silu = M.SiLU()
output = silu(data)
with np.printoptions(precision=6):
print(output.numpy())

Outputs:

.. testoutput::

[-0.238406 -0.268941 0. 0.731059 1.761594]
>>> import numpy as np
>>> data = mge.tensor(np.array([-2,-1,0,1,2,]).astype(np.float32))
>>> silu = M.SiLU()
>>> output = silu(data)
>>> with np.printoptions(precision=6):
... print(output.numpy())
[-0.238406 -0.268941 0. 0.731059 1.761594]
"""

def forward(self, inputs):
@@ -131,24 +98,13 @@ class GELU(Module):
where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

data = mge.tensor(np.array([-2,-1,0,1,2,]).astype(np.float32))
gelu = M.GELU()
output = gelu(data)
with np.printoptions(precision=4):
print(output.numpy())

Outputs:

.. testoutput::

[-0.0455 -0.1587 0. 0.8413 1.9545]
>>> import numpy as np
>>> data = mge.tensor(np.array([-2,-1,0,1,2,]).astype(np.float32))
>>> gelu = M.GELU()
>>> output = gelu(data)
>>> with np.printoptions(precision=4):
... print(output.numpy())
[-0.0455 -0.1587 0. 0.8413 1.9545]
"""

def forward(self, inputs):
@@ -162,23 +118,13 @@ class ReLU(Module):
\text{ReLU}(x) = \max(x, 0)

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M
data = mge.tensor(np.array([-2,-1,0,1,2,]).astype(np.float32))
relu = M.ReLU()
output = relu(data)
with np.printoptions(precision=6):
print(output.numpy())

Outputs:

.. testoutput::

[0. 0. 0. 1. 2.]
>>> import numpy as np
>>> data = mge.tensor(np.array([-2,-1,0,1,2,]).astype(np.float32))
>>> relu = M.ReLU()
>>> output = relu(data)
>>> with np.printoptions(precision=6):
... print(output.numpy())
[0. 0. 0. 1. 2.]
"""

def forward(self, x):
@@ -209,22 +155,12 @@ class PReLU(Module):
init: the initial value of :math:`a`. Default: 0.25

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M
data = mge.tensor(np.array([-1.2, -3.7, 2.7]).astype(np.float32))
prelu = M.PReLU()
output = prelu(data)
print(output.numpy())

Outputs:

.. testoutput::

[-0.3 -0.925 2.7 ]
>>> import numpy as np
>>> data = mge.tensor(np.array([-1.2, -3.7, 2.7]).astype(np.float32))
>>> prelu = M.PReLU()
>>> output = prelu(data)
>>> output.numpy()
array([-0.3 , -0.925, 2.7 ], dtype=float32)
"""

def __init__(self, num_parameters: int = 1, init: float = 0.25, **kwargs):
@@ -258,23 +194,12 @@ class LeakyReLU(Module):
\end{cases}

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M
data = mge.tensor(np.array([-8, -12, 6, 10]).astype(np.float32))

leakyrelu = M.LeakyReLU(0.01)
output = leakyrelu(data)
print(output.numpy())

Outputs:

.. testoutput::

[-0.08 -0.12 6. 10. ]
>>> import numpy as np
>>> data = mge.tensor(np.array([-8, -12, 6, 10]).astype(np.float32))
>>> leakyrelu = M.LeakyReLU(0.01)
>>> output = leakyrelu(data)
>>> output.numpy()
array([-0.08, -0.12, 6. , 10. ], dtype=float32)
"""

def __init__(self, negative_slope: float = 0.01, **kwargs):


+ 14
- 38
imperative/python/megengine/module/adaptive_pooling.py View File

@@ -45,25 +45,13 @@ class AdaptiveMaxPool2d(_AdaptivePoolNd):
* kernel_size: (IH - (OH - 1) * stride_h, IW - (OW - 1) * stride_w)

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

m = M.AdaptiveMaxPool2d((2, 2))
inp = mge.tensor(np.arange(0, 16).astype("float32").reshape(1, 1, 4, 4))
oup = m(inp)
print(oup.numpy())

Outputs:

.. testoutput::

[[[[ 5. 7.]
[13. 15.]]]]

>>> import numpy as np
>>> m = M.AdaptiveMaxPool2d((2, 2))
>>> inp = mge.tensor(np.arange(0, 16).astype("float32").reshape(1, 1, 4, 4))
>>> oup = m(inp)
>>> oup.numpy()
array([[[[ 5., 7.],
[13., 15.]]]], dtype=float32)
"""

def forward(self, inp):
@@ -89,25 +77,13 @@ class AdaptiveAvgPool2d(_AdaptivePoolNd):
* kernel_size: (IH - (OH - 1) * stride_h, IW - (OW - 1) * stride_w)

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

m = M.AdaptiveAvgPool2d((2, 2))
inp = mge.tensor(np.arange(0, 16).astype("float32").reshape(1, 1, 4, 4))
oup = m(inp)
print(oup.numpy())

Outputs:

.. testoutput::

[[[[ 2.5 4.5]
[10.5 12.5]]]]

>>> import numpy as np
>>> m = M.AdaptiveAvgPool2d((2, 2))
>>> inp = mge.tensor(np.arange(0, 16).astype("float32").reshape(1, 1, 4, 4))
>>> oup = m(inp)
>>> oup.numpy()
array([[[[ 2.5, 4.5],
[10.5, 12.5]]]], dtype=float32)
"""

def forward(self, inp):


+ 12
- 23
imperative/python/megengine/module/batchnorm.py View File

@@ -296,29 +296,18 @@ class BatchNorm2d(_BatchNorm):
Default: False

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

# With Learnable Parameters
m = M.BatchNorm2d(4)
inp = mge.tensor(np.random.rand(1, 4, 3, 3).astype("float32"))
oup = m(inp)
print(m.weight.numpy().flatten(), m.bias.numpy().flatten())
# Without L`e`arnable Parameters
m = M.BatchNorm2d(4, affine=False)
oup = m(inp)
print(m.weight, m.bias)

Outputs:

.. testoutput::

[1. 1. 1. 1.] [0. 0. 0. 0.]
None None
>>> import numpy as np
>>> # With Learnable Parameters
>>> m = M.BatchNorm2d(4)
>>> inp = mge.tensor(np.random.rand(1, 4, 3, 3).astype("float32"))
>>> oup = m(inp)
>>> print(m.weight.numpy().flatten(), m.bias.numpy().flatten())
[1. 1. 1. 1.] [0. 0. 0. 0.]
>>> # Without L`e`arnable Parameters
>>> m = M.BatchNorm2d(4, affine=False)
>>> oup = m(inp)
>>> print(m.weight, m.bias)
None None
"""

def _check_input_ndim(self, inp):


+ 18
- 51
imperative/python/megengine/module/conv.py View File

@@ -149,23 +149,12 @@ class Conv1d(_ConvNd):
* ``bias`` usually has shape ``(1, out_channels, 1)``

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

m = M.Conv1d(in_channels=3, out_channels=1, kernel_size=3)
inp = mge.tensor(np.arange(0, 24).astype("float32").reshape(2, 3, 4))
oup = m(inp)
print(oup.numpy().shape)

Outputs:

.. testoutput::

(2, 1, 2)
>>> import numpy as np
>>> m = M.Conv1d(in_channels=3, out_channels=1, kernel_size=3)
>>> inp = mge.tensor(np.arange(0, 24).astype("float32").reshape(2, 3, 4))
>>> oup = m(inp)
>>> oup.numpy().shape
(2, 1, 2)
"""

def __init__(
@@ -334,23 +323,12 @@ class Conv2d(_ConvNd):
* ``bias`` usually has shape ``(1, out_channels, *1)``

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

m = M.Conv2d(in_channels=3, out_channels=1, kernel_size=3)
inp = mge.tensor(np.arange(0, 96).astype("float32").reshape(2, 3, 4, 4))
oup = m(inp)
print(oup.numpy().shape)

Outputs:

.. testoutput::

(2, 1, 2, 2)
>>> import numpy as np
>>> m = M.Conv2d(in_channels=3, out_channels=1, kernel_size=3)
>>> inp = mge.tensor(np.arange(0, 96).astype("float32").reshape(2, 3, 4, 4))
>>> oup = m(inp)
>>> oup.numpy().shape
(2, 1, 2, 2)
"""

def __init__(
@@ -503,23 +481,12 @@ class Conv3d(_ConvNd):
* ``bias`` usually has shape ``(1, out_channels, *1)``

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

m = M.Conv3d(in_channels=3, out_channels=1, kernel_size=3)
inp = mge.tensor(np.arange(0, 384).astype("float32").reshape(2, 3, 4, 4, 4))
oup = m(inp)
print(oup.numpy().shape)

Outputs:

.. testoutput::

(2, 1, 2, 2, 2)
>>> import numpy as np
>>> m = M.Conv3d(in_channels=3, out_channels=1, kernel_size=3)
>>> inp = mge.tensor(np.arange(0, 384).astype("float32").reshape(2, 3, 4, 4, 4))
>>> oup = m(inp)
>>> oup.numpy().shape
(2, 1, 2, 2, 2)
"""

def __init__(


+ 17
- 39
imperative/python/megengine/module/embedding.py View File

@@ -32,26 +32,15 @@ class Embedding(Module):
initial_weight: the learnable weights of the module of shape (num_embeddings, embedding_dim).

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M
weight = mge.tensor(np.array([(1.2,2.3,3.4,4.5,5.6)], dtype=np.float32))
data = mge.tensor(np.array([(0,0)], dtype=np.int32))

embedding = M.Embedding(1, 5, initial_weight=weight)
output = embedding(data)
with np.printoptions(precision=6):
print(output.numpy())

Outputs:

.. testoutput::

[[[1.2 2.3 3.4 4.5 5.6]
[1.2 2.3 3.4 4.5 5.6]]]
>>> import numpy as np
>>> weight = mge.tensor(np.array([(1.2,2.3,3.4,4.5,5.6)], dtype=np.float32))
>>> data = mge.tensor(np.array([(0,0)], dtype=np.int32))
>>> embedding = M.Embedding(1, 5, initial_weight=weight)
>>> output = embedding(data)
>>> with np.printoptions(precision=6):
... print(output.numpy())
[[[1.2 2.3 3.4 4.5 5.6]
[1.2 2.3 3.4 4.5 5.6]]]
"""

def __init__(
@@ -119,25 +108,14 @@ class Embedding(Module):
norm_type: should be set to None, not support Now.

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M
weight = mge.tensor(np.array([(1.2,2.3,3.4,4.5,5.6)], dtype=np.float32))
data = mge.tensor(np.array([(0,0)], dtype=np.int32))

embedding = M.Embedding.from_pretrained(weight, freeze=False)
output = embedding(data)
print(output.numpy())

Outputs:

.. testoutput::

[[[1.2 2.3 3.4 4.5 5.6]
[1.2 2.3 3.4 4.5 5.6]]]
>>> import numpy as np
>>> weight = mge.tensor(np.array([(1.2,2.3,3.4,4.5,5.6)], dtype=np.float32))
>>> data = mge.tensor(np.array([(0,0)], dtype=np.int32))
>>> embedding = M.Embedding.from_pretrained(weight, freeze=False)
>>> output = embedding(data)
>>> output.numpy()
array([[[1.2, 2.3, 3.4, 4.5, 5.6],
[1.2, 2.3, 3.4, 4.5, 5.6]]], dtype=float32)
"""
embeddings_shape = embeddings.shape
embeddings_dim = len(embeddings_shape)


+ 6
- 17
imperative/python/megengine/module/linear.py View File

@@ -31,23 +31,12 @@ class Linear(Module):


Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

m = M.Linear(in_features=3, out_features=1)
inp = mge.tensor(np.arange(0, 6).astype("float32").reshape(2, 3))
oup = m(inp)
print(oup.numpy().shape)

Outputs:

.. testoutput::

(2, 1)
>>> import numpy as np
>>> m = M.Linear(in_features=3, out_features=1)
>>> inp = mge.tensor(np.arange(0, 6).astype("float32").reshape(2, 3))
>>> oup = m(inp)
>>> oup.numpy().shape
(2, 1)
"""

def __init__(


+ 11
- 25
imperative/python/megengine/module/lrn.py View File

@@ -23,32 +23,18 @@ class LocalResponseNorm(Module):
beta: hyperparameter beta. The default value is 0.75.

Example:

.. testcode::

from megengine import tensor
import megengine.module as M
import numpy as np

inp = tensor(np.arange(25, dtype=np.float32).reshape(1,1,5,5))
GT = np.array([[[[ 0., 0.999925, 1.9994003, 2.9979765, 3.9952066],
[ 4.9906454, 5.983851, 6.974385, 7.961814, 8.945709 ],
[ 9.925651, 10.90122, 11.872011, 12.837625, 13.7976675],
[14.751757, 15.699524, 16.640602, 17.574642, 18.501305 ],
[19.420258, 20.331186, 21.233786, 22.127764, 23.012836 ]]]])

op = M.LocalResponseNorm(kernel_size=3, k=1.0, alpha=1e-4, beta=0.75)
out = op(inp)
np.testing.assert_allclose(GT, out.numpy(), rtol=1e-6, atol=1e-6)
print('pass')


Outputs:

.. testoutput::

>>> import numpy as np
>>> inp = Tensor(np.arange(25, dtype=np.float32).reshape(1,1,5,5))
>>> GT = np.array([[[[ 0., 0.999925, 1.9994003, 2.9979765, 3.9952066],
... [ 4.9906454, 5.983851, 6.974385, 7.961814, 8.945709 ],
... [ 9.925651, 10.90122, 11.872011, 12.837625, 13.7976675],
... [14.751757, 15.699524, 16.640602, 17.574642, 18.501305 ],
... [19.420258, 20.331186, 21.233786, 22.127764, 23.012836 ]]]])
>>> op = M.LocalResponseNorm(kernel_size=3, k=1.0, alpha=1e-4, beta=0.75)
>>> out = op(inp)
>>> np.testing.assert_allclose(GT, out.numpy(), rtol=1e-6, atol=1e-6)
>>> print('pass')
pass

"""

def __init__(


+ 7
- 18
imperative/python/megengine/module/pooling.py View File

@@ -60,24 +60,13 @@ class MaxPool2d(_PoolNd):
padding: implicit zero padding to be added on both sides.

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.module as M

m = M.MaxPool2d(kernel_size=3, stride=1, padding=0)
inp = mge.tensor(np.arange(0, 16).astype("float32").reshape(1, 1, 4, 4))
oup = m(inp)
print(oup.numpy())

Outputs:

.. testoutput::

[[[[10. 11.]
[14. 15.]]]]
>>> import numpy as np
>>> m = M.MaxPool2d(kernel_size=3, stride=1, padding=0)
>>> inp = mge.tensor(np.arange(0, 16).astype("float32").reshape(1, 1, 4, 4))
>>> oup = m(inp)
>>> oup.numpy()
array([[[[10., 11.],
[14., 15.]]]], dtype=float32)
"""

def forward(self, inp):


+ 22
- 32
imperative/python/megengine/module/sliding_window.py View File

@@ -32,38 +32,28 @@ class SlidingWindow(Module):
dilation: the dilation of the window. Default: 1

Example:

.. testcode::

from megengine import tensor
import megengine.module as M
import numpy as np

inp = tensor(np.arange(30).reshape(1,1,5,6))
op = M.SlidingWindow(kernel_size=3, padding=1, stride=2, dilation=2)
out = op(inp)
print(out.numpy())

Outputs:

.. testoutput::

[[[[[[ 0 0 0]
[ 0 7 9]
[ 0 19 21]]

[[ 0 0 0]
[ 7 9 11]
[19 21 23]]]


[[[ 0 7 9]
[ 0 19 21]
[ 0 0 0]]

[[ 7 9 11]
[19 21 23]
[ 0 0 0]]]]]]
>>> import numpy as np
>>> inp = Tensor(np.arange(30).reshape(1,1,5,6))
>>> op = M.SlidingWindow(kernel_size=3, padding=1, stride=2, dilation=2)
>>> out = op(inp)
>>> print(out.numpy())
[[[[[[ 0 0 0]
[ 0 7 9]
[ 0 19 21]]
<BLANKLINE>
[[ 0 0 0]
[ 7 9 11]
[19 21 23]]]
<BLANKLINE>
<BLANKLINE>
[[[ 0 7 9]
[ 0 19 21]
[ 0 0 0]]
<BLANKLINE>
[[ 7 9 11]
[19 21 23]
[ 0 0 0]]]]]]
"""

def __init__(


+ 106
- 206
imperative/python/megengine/random/rng.py View File

@@ -241,22 +241,12 @@ class RNG:


Examples:

.. testcode::

import megengine.random as rand
rng = rand.RNG(seed=100)
x = rng.uniform(size=(2, 2))
print(x.numpy())

Outputs:

.. testoutput::
:options: +SKIP

[[0.84811664 0.6147553 ]
[0.59429836 0.64727545]]

>>> import megengine.random as rand
>>> rng = rand.RNG(seed=100)
>>> x = rng.uniform(size=(2, 2))
>>> x.numpy() # doctest: +SKIP
array([[0.84811664, 0.6147553 ],
[0.59429836, 0.64727545]], dtype=float32)
"""

def __init__(self, seed: int = None, device: str = None):
@@ -283,22 +273,11 @@ class RNG:
the output tensor.

Examples:

.. testcode::

import megengine as mge
import megengine.random as rand

x = rand.uniform(size=(2, 2))
print(x.numpy())

Outputs:

.. testoutput::
:options: +SKIP

[[0.91600335 0.6680226 ]
[0.2046729 0.2769141 ]]
>>> import megengine.random as rand
>>> x = rand.uniform(size=(2, 2))
>>> x.numpy() # doctest: +SKIP
array([[0.28603864, 0.3156649 ],
[0.42066026, 0.9805052 ]], dtype=float32)
"""
_seed = self._seed() if callable(self._seed) else self._seed
return _uniform(
@@ -325,22 +304,11 @@ class RNG:
the output tensor.

Examples:

.. testcode::

import megengine as mge
import megengine.random as rand

x = rand.normal(mean=0, std=1, size=(2, 2))
print(x.numpy())

Outputs:

.. testoutput::
:options: +SKIP

[[-1.4010863 -0.9874344 ]
[ 0.56373274 0.79656655]]
>>> import megengine.random as rand
>>> x = rand.normal(mean=0, std=1, size=(2, 2))
>>> x.numpy() # doctest: +SKIP
array([[ 1.5534291 , -0.28356555],
[ 2.2230418 , -0.92425716]], dtype=float32)
"""
_seed = self._seed() if callable(self._seed) else self._seed
return _normal(
@@ -386,40 +354,25 @@ class RNG:
the output tensor.

Examples:

.. testcode::

import megengine as mge
import megengine.random as rand

x = rand.gamma(shape=2, scale=1, size=(2, 2))
print(x.numpy())

shape = mge.Tensor([[ 1],
[10]], dtype="float32")
scale = mge.Tensor([1,5], dtype="float32")

x = rand.gamma(shape=shape, scale=scale)
print(x.numpy())

x = rand.gamma(shape=shape, scale=scale, size=2)
print(x.numpy())

Outputs:

.. testoutput::
:options: +SKIP

[[1.5064533 4.0689363 ]
[0.71639484 1.4551026 ]]

[[ 0.4352188 11.399335 ]
[ 9.1888 52.009277 ]]

[[[ 1.1726005 3.9654975 ]
[13.656933 36.559006 ]]
[[ 0.25848487 2.5540342 ]
[11.960409 21.031536 ]]]
>>> import megengine.random as rand
>>> x = rand.gamma(shape=2, scale=1, size=(2, 2))
>>> x.numpy() # doctest: +SKIP
array([[0.97447544, 1.5668875 ],
[1.0069491 , 0.3078318 ]], dtype=float32)
>>> shape = mge.Tensor([[ 1],
... [10]], dtype="float32")
>>> scale = mge.Tensor([1,5], dtype="float32")
>>> x = rand.gamma(shape=shape, scale=scale)
>>> x.numpy() # doctest: +SKIP
array([[ 0.11312152, 3.0799196 ],
[10.973469 , 29.596972 ]], dtype=float32)
>>> x = rand.gamma(shape=shape, scale=scale, size=2)
>>> x.numpy() # doctest: +SKIP
array([[[4.35868073e+00, 1.22415285e+01],
[1.02696848e+01, 4.19773598e+01]],

[[7.73875117e-02, 6.06766164e-01],
[1.22881927e+01, 8.13445740e+01]]], dtype=float32)
"""
_seed = self._seed() if callable(self._seed) else self._seed
return _gamma(
@@ -458,40 +411,25 @@ class RNG:
the output tensor.

Examples:

.. testcode::

import megengine as mge
import megengine.random as rand

x = rand.beta(alpha=2, beta=1, size=(2, 2))
print(x.numpy())

alpha = mge.Tensor([[0.5],
[ 3]], dtype="float32")
beta = mge.Tensor([0.5,5], dtype="float32")

x = rand.beta(alpha=alpha, beta=beta)
print(x.numpy())

x = rand.beta(alpha=alpha, beta=beta, size=2)
print(x.numpy())

Outputs:

.. testoutput::
:options: +SKIP

[[0.582565 0.91763186]
[0.86963767 0.6088103 ]]

[[0.41503012 0.16438372]
[0.90159506 0.47588003]]

[[[0.55195075 0.01111084]
[0.95298755 0.25048104]]
[[0.11680304 0.13859665]
[0.997879 0.43259275]]]
>>> import megengine.random as rand
>>> x = rand.beta(alpha=2, beta=1, size=(2, 2))
>>> x.numpy() # doctest: +SKIP
array([[0.6172312 , 0.9789006 ],
[0.50004643, 0.9775796 ]], dtype=float32)
>>> alpha = mge.Tensor([[0.5],
... [ 3]], dtype="float32")
>>> beta = mge.Tensor([0.5,5], dtype="float32")
>>> x = rand.beta(alpha=alpha, beta=beta)
>>> x.numpy() # doctest: +SKIP
array([[0.0075407 , 0.1275094 ],
[0.96331763, 0.22299217]], dtype=float32)
>>> x = rand.beta(alpha=alpha, beta=beta, size=2)
>>> x.numpy() # doctest: +SKIP
array([[[0.46863747, 0.13819647],
[0.8646759 , 0.16014215]],

[[0.0682759 , 0.04448463],
[0.97733796, 0.19206746]]], dtype=float32)
"""
_seed = self._seed() if callable(self._seed) else self._seed
return _beta(alpha=alpha, beta=beta, size=size, seed=_seed, handle=self._handle)
@@ -519,40 +457,26 @@ class RNG:


Examples:

.. testcode::

import megengine as mge
import megengine.random as rand

x = rand.poisson(lam=2., size=(1, 3))
print(x.numpy())

lam = mge.Tensor([[1.,1.],
[10,10]], dtype="float32")

x = rand.poisson(lam=lam)
print(x.numpy())

x = rand.poisson(lam=lam, size=(1,3))
print(x.numpy())

Outputs:

.. testoutput::
:options: +SKIP

[[3. 1. 3.]]

[[ 2. 2.]
[12. 11.]]

[[[[ 1. 1.]
[11. 4.]]
[[ 0. 0.]
[ 9. 13.]]
[[ 0. 1.]
[ 7. 12.]]]]
>>> import megengine.random as rand
>>> x = rand.poisson(lam=2., size=(1, 3))
>>> x.numpy() # doctest: +SKIP
array([[1., 2., 2.]], dtype=float32)
>>> lam = mge.Tensor([[1.,1.],
... [10,10]], dtype="float32")
>>> x = rand.poisson(lam=lam)
>>> x.numpy() # doctest: +SKIP
array([[ 1., 2.],
[11., 11.]], dtype=float32)
>>> x = rand.poisson(lam=lam, size=(1,3))
>>> x.numpy() # doctest: +SKIP
array([[[[ 2., 1.],
[10., 8.]],

[[ 5., 2.],
[10., 10.]],

[[ 1., 2.],
[ 8., 10.]]]], dtype=float32)
"""
_seed = self._seed() if callable(self._seed) else self._seed
return _poisson(lam=lam, size=size, seed=_seed, handle=self._handle)
@@ -571,36 +495,23 @@ class RNG:
the output tensor.

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.random as rand

x = rand.permutation(10, dtype="int32")
print(x.numpy())

x = rand.permutation(10, dtype="float32")
print(x.numpy())

x = mge.tensor(np.arange(18)).reshape(6,3)
x = rand.permutation(x)
print(x.numpy())

Outputs:

.. testoutput::
:options: +SKIP

[4 5 0 7 3 8 6 1 9 2]
[3. 4. 9. 0. 6. 8. 7. 1. 5. 2.]
[[12 13 14]
[ 3 4 5]
[15 16 17]
[ 0 1 2]
[ 9 10 11]
[ 6 7 8]]
>>> import numpy as np
>>> import megengine.random as rand
>>> x = rand.permutation(10, dtype="int32")
>>> x.numpy() # doctest: +SKIP
array([8, 4, 0, 3, 5, 6, 2, 1, 7, 9], dtype=int32)
>>> x = rand.permutation(10, dtype="float32")
>>> x.numpy() # doctest: +SKIP
array([1., 3., 0., 2., 4., 8., 7., 9., 6., 5.], dtype=float32)
>>> x = mge.tensor(np.arange(18)).reshape(6,3)
>>> x = rand.permutation(x)
>>> x.numpy() # doctest: +SKIP
array([[15, 16, 17],
[ 6, 7, 8],
[ 0, 1, 2],
[ 3, 4, 5],
[12, 13, 14],
[ 9, 10, 11]], dtype=int32)
"""
_seed = self._seed() if callable(self._seed) else self._seed
if isinstance(n, int):
@@ -619,32 +530,21 @@ class RNG:
inp: input tensor.

Examples:

.. testcode::

import numpy as np
import megengine as mge
import megengine.random as rand

x = mge.tensor(np.arange(10))
rand.shuffle(x)
print(x.numpy())
y = mge.tensor(np.arange(18)).reshape(6,3)
rand.shuffle(y)
print(y.numpy())

Outputs:

.. testoutput::
:options: +SKIP

[7 9 3 0 8 2 4 5 6 1]
[[12. 13. 14.]
[ 3. 4. 5.]
[15. 16. 17.]
[ 0. 1. 2.]
[ 9. 10. 11.]
[ 6. 7. 8.]]
>>> import numpy as np
>>> import megengine.random as rand
>>> x = mge.tensor(np.arange(10))
>>> rand.shuffle(x)
>>> x.numpy() # doctest: +SKIP
array([4, 5, 9, 6, 2, 8, 1, 0, 3, 7], dtype=int32)
>>> y = mge.tensor(np.arange(18)).reshape(6,3)
>>> rand.shuffle(y)
>>> y.numpy() # doctest: +SKIP
array([[ 3, 4, 5],
[ 6, 7, 8],
[15, 16, 17],
[ 0, 1, 2],
[12, 13, 14],
[ 9, 10, 11]], dtype=int32)
"""
_seed = self._seed() if callable(self._seed) else self._seed
inp._reset(_shuffle(inp=inp, seed=_seed, handle=self._handle))


Loading…
Cancel
Save