Browse Source

docs(mge): fix doctest

GitOrigin-RevId: 131fed8733
tags/v1.0.0-rc1
Megvii Engine Team 4 years ago
parent
commit
6c1dbd40d9
11 changed files with 108 additions and 64 deletions
  1. +1
    -1
      imperative/python/megengine/core/tensor/function.py
  2. +2
    -2
      imperative/python/megengine/data/transform/vision/transform.py
  3. +7
    -5
      imperative/python/megengine/functional/elemwise.py
  4. +24
    -4
      imperative/python/megengine/functional/math.py
  5. +33
    -16
      imperative/python/megengine/functional/nn.py
  6. +17
    -7
      imperative/python/megengine/functional/tensor.py
  7. +0
    -13
      imperative/python/megengine/functional/utils.py
  8. +1
    -1
      imperative/python/megengine/module/activation.py
  9. +5
    -3
      imperative/python/megengine/module/batchnorm.py
  10. +12
    -10
      imperative/python/megengine/module/sequential.py
  11. +6
    -2
      imperative/python/megengine/random/distribution.py

+ 1
- 1
imperative/python/megengine/core/tensor/function.py View File

@@ -31,7 +31,7 @@ class Function:
self.y = y self.y = y
return y return y


def backward(self. output_grads):
def backward(self, output_grads):
y = self.y y = self.y
return output_grads * y * (1-y) return output_grads * y * (1-y)




+ 2
- 2
imperative/python/megengine/data/transform/vision/transform.py View File

@@ -194,9 +194,9 @@ class Compose(VisionTransform):
will be random shuffled, the 2nd and 4th transform will also be shuffled. will be random shuffled, the 2nd and 4th transform will also be shuffled.
:param order: The same with :class:`VisionTransform` :param order: The same with :class:`VisionTransform`


Example:
Examples:


..testcode::
.. testcode::


from megengine.data.transform import RandomHorizontalFlip, RandomVerticalFlip, CenterCrop, ToMode, Compose from megengine.data.transform import RandomHorizontalFlip, RandomVerticalFlip, CenterCrop, ToMode, Compose




+ 7
- 5
imperative/python/megengine/functional/elemwise.py View File

@@ -197,8 +197,8 @@ def sqrt(inp: Tensor) -> Tensor:


.. testoutput:: .. testoutput::


[[0. 1. 1.4142]
[1.7321 2. 2.2361 ]]
[[0. 1. 1.4142]
[1.7321 2. 2.2361]]


""" """
return inp ** 0.5 return inp ** 0.5
@@ -227,8 +227,8 @@ def square(inp: Tensor) -> Tensor:


.. testoutput:: .. testoutput::


[[0. 1. 4.]
[9. 16. 25.]]
[[ 0. 1. 4.]
[ 9. 16. 25.]]


""" """
return inp ** 2 return inp ** 2
@@ -437,7 +437,7 @@ def clamp(inp: Tensor, lower=None, upper=None) -> Tensor:
:param lower: lower-bound of the range to be clamped to :param lower: lower-bound of the range to be clamped to
:param upper: upper-bound of the range to be clamped to :param upper: upper-bound of the range to be clamped to


Example:
Examples:


.. testcode:: .. testcode::


@@ -452,6 +452,8 @@ def clamp(inp: Tensor, lower=None, upper=None) -> Tensor:


print(F.clamp(a, upper=3).numpy()) print(F.clamp(a, upper=3).numpy())


Outputs:

.. testoutput:: .. testoutput::


[2 2 2 3 4] [2 2 2 3 4]


+ 24
- 4
imperative/python/megengine/functional/math.py View File

@@ -58,6 +58,8 @@ def isnan(inp: Tensor) -> Tensor:


print(F.isnan(x).numpy()) print(F.isnan(x).numpy())


Outputs:

.. testoutput:: .. testoutput::


[False True False] [False True False]
@@ -83,6 +85,8 @@ def isinf(inp: Tensor) -> Tensor:


print(F.isinf(x).numpy()) print(F.isinf(x).numpy())


Outputs:
.. testoutput:: .. testoutput::


[False True False] [False True False]
@@ -141,7 +145,9 @@ def sum(
data = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3)) data = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
out = F.sum(data) out = F.sum(data)
print(out.numpy()) print(out.numpy())

Outputs:
.. testoutput:: .. testoutput::


[21] [21]
@@ -208,6 +214,8 @@ def mean(
out = F.mean(data) out = F.mean(data)
print(out.numpy()) print(out.numpy())


Outputs:

.. testoutput:: .. testoutput::


[3.5] [3.5]
@@ -250,9 +258,11 @@ def var(
out = F.var(data) out = F.var(data)
print(out.numpy()) print(out.numpy())


Outputs:

.. testoutput:: .. testoutput::


[2.9166667]
[2.9167]
""" """
if axis is None: if axis is None:
m = mean(inp, axis=axis, keepdims=False) m = mean(inp, axis=axis, keepdims=False)
@@ -288,9 +298,11 @@ def std(
out = F.std(data, axis=1) out = F.std(data, axis=1)
print(out.numpy()) print(out.numpy())


Outputs:

.. testoutput:: .. testoutput::


[0.8164966 0.8164966]
[0.8165 0.8165]
""" """
return var(inp, axis=axis, keepdims=keepdims) ** 0.5 return var(inp, axis=axis, keepdims=keepdims) ** 0.5


@@ -354,6 +366,8 @@ def max(
y = F.max(x) y = F.max(x)
print(y.numpy()) print(y.numpy())


Outputs:

.. testoutput:: .. testoutput::


[6] [6]
@@ -388,9 +402,11 @@ def norm(
y = F.norm(x) y = F.norm(x)
print(y.numpy()) print(y.numpy())


Outputs:

.. testoutput:: .. testoutput::


[4.358899]
[4.3589]


""" """
if p == 0: if p == 0:
@@ -426,6 +442,8 @@ def argmin(
y = F.argmin(x) y = F.argmin(x)
print(y.numpy()) print(y.numpy())


Outputs:

.. testoutput:: .. testoutput::


[0] [0]
@@ -479,6 +497,8 @@ def argmax(
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3)) x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
y = F.argmax(x) y = F.argmax(x)
print(y.numpy()) print(y.numpy())
Outputs:


.. testoutput:: .. testoutput::




+ 33
- 16
imperative/python/megengine/functional/nn.py View File

@@ -372,10 +372,12 @@ def softplus(inp: Tensor) -> Tensor:
x = tensor(np.arange(-3, 3, dtype=np.float32)) x = tensor(np.arange(-3, 3, dtype=np.float32))
y = F.softplus(x) y = F.softplus(x)
print(y.numpy()) print(y.numpy())
Outputs:
.. testoutput::


.. output::

[0.04858735 0.126928 0.3132617 0.6931472 1.3132617 2.126928 ]
[0.0486 0.1269 0.3133 0.6931 1.3133 2.1269]


""" """
return log1p(exp(-abs(inp))) + relu(inp) return log1p(exp(-abs(inp))) + relu(inp)
@@ -411,10 +413,12 @@ def log_softmax(inp: Tensor, axis: Union[int, Sequence[int]]) -> Tensor:
y = F.log_softmax(x, axis=1) y = F.log_softmax(x, axis=1)
print(y.numpy()) print(y.numpy())


.. output::
Outputs:
.. testoutput::


[[-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
[-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]]
[[-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]
[-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]]


""" """
return inp - logsumexp(inp, axis, keepdims=True) return inp - logsumexp(inp, axis, keepdims=True)
@@ -432,6 +436,7 @@ def logsigmoid(inp: Tensor) -> Tensor:
:param inp: The input tensor :param inp: The input tensor


Examples: Examples:

.. testcode:: .. testcode::


import numpy as np import numpy as np
@@ -442,9 +447,12 @@ def logsigmoid(inp: Tensor) -> Tensor:
y = F.logsigmoid(x) y = F.logsigmoid(x)
print(y.numpy()) print(y.numpy())


.. output::
Outputs:

.. testoutput::


[-5.0067153 -4.01815 -3.0485873 -2.126928 -1.3132617 -0.6931472 -0.3132617 -0.126928 -0.04858735 -0.01814993]
[-5.0067 -4.0181 -3.0486 -2.1269 -1.3133 -0.6931 -0.3133 -0.1269 -0.0486
-0.0181]


""" """
return -softplus(-inp) return -softplus(-inp)
@@ -478,6 +486,7 @@ def logsumexp(
:param keepdims: whether to retain :attr:`axis` or not for the output tensor. :param keepdims: whether to retain :attr:`axis` or not for the output tensor.


Examples: Examples:
.. testcode:: .. testcode::


import numpy as np import numpy as np
@@ -488,9 +497,11 @@ def logsumexp(
y = F.logsumexp(x, axis=1, keepdims=False) y = F.logsumexp(x, axis=1, keepdims=False)
print(y.numpy()) print(y.numpy())


.. output::
Outputs:

.. testoutput::


[-0.5480856 4.4519143]
[-0.5481 4.4519]


""" """
max_value = max(inp, axis, keepdims=True) max_value = max(inp, axis, keepdims=True)
@@ -577,8 +588,9 @@ def softmax(inp: Tensor, axis: Optional[int] = None) -> Tensor:
Outputs: Outputs:


.. testoutput:: .. testoutput::
[[0.01165623 0.03168492 0.08612854 0.23412167 0.6364086 ]
[0.01165623 0.03168492 0.08612854 0.23412167 0.6364086 ]]

[[0.0117 0.0317 0.0861 0.2341 0.6364]
[0.0117 0.0317 0.0861 0.2341 0.6364]]


""" """
if axis is None: if axis is None:
@@ -1026,7 +1038,7 @@ def dot(inp1: Tensor, inp2: Tensor) -> Tensor:


Examples: Examples:


.. teestcode::
.. testcode::


import numpy as np import numpy as np
from megengine import tensor from megengine import tensor
@@ -1039,9 +1051,10 @@ def dot(inp1: Tensor, inp2: Tensor) -> Tensor:


Outputs: Outputs:


.. testoutput::

[55.] [55.]


.. testoutputs::
""" """
op = builtin.Dot() op = builtin.Dot()
inp1, inp2 = utils.convert_inputs(inp1, inp2) inp1, inp2 = utils.convert_inputs(inp1, inp2)
@@ -1058,7 +1071,7 @@ def svd(inp: Tensor, full_matrices=False, compute_uv=True) -> Tensor:


Examples: Examples:


.. teestcode::
.. testcode::


import numpy as np import numpy as np
from megengine import tensor from megengine import tensor
@@ -1070,7 +1083,9 @@ def svd(inp: Tensor, full_matrices=False, compute_uv=True) -> Tensor:


Outputs: Outputs:


[7.348, 1.]
.. testoutput::
[7.3485 1. ]


""" """
op = builtin.SVD(full_matrices=full_matrices, compute_uv=compute_uv) op = builtin.SVD(full_matrices=full_matrices, compute_uv=compute_uv)
@@ -1445,6 +1460,8 @@ def indexing_one_hot(
val = F.indexing_one_hot(src, index) val = F.indexing_one_hot(src, index)
print(val.numpy()) print(val.numpy())


Outputs:
.. testoutput:: .. testoutput::


[1.] [1.]


+ 17
- 7
imperative/python/megengine/functional/tensor.py View File

@@ -60,7 +60,7 @@ __all__ = [
] ]




def eye(n: int, *, dtype=None, device: Optional[CompNode] = None) -> Tensor:
def eye(n: int, *, dtype="float32", device: Optional[CompNode] = None) -> Tensor:
""" """
Returns a 2D tensor with ones on the diagonal and zeros elsewhere. Returns a 2D tensor with ones on the diagonal and zeros elsewhere.


@@ -80,7 +80,7 @@ def eye(n: int, *, dtype=None, device: Optional[CompNode] = None) -> Tensor:


data_shape = (4, 6) data_shape = (4, 6)
n, m = data_shape n, m = data_shape
out = F.eye(n, m, dtype=np.float32)
out = F.eye([n, m], dtype=np.float32)
print(out.numpy()) print(out.numpy())


Outputs: Outputs:
@@ -135,6 +135,8 @@ def zeros_like(inp: Tensor) -> Tensor:
out = F.zeros_like(inp) out = F.zeros_like(inp)
print(out.numpy()) print(out.numpy())


Outputs:
.. testoutput:: .. testoutput::


[[0 0 0] [[0 0 0]
@@ -638,7 +640,7 @@ def cond_take(mask: Tensor, x: Tensor) -> Tensor:


.. testoutput:: .. testoutput::


Tensor([1. 4.]) Tensor([0 3], dtype=int32)
[1. 4.] [0 3]


""" """
if not isinstance(x, (TensorWrapperBase, TensorBase)): if not isinstance(x, (TensorWrapperBase, TensorBase)):
@@ -888,6 +890,8 @@ def linspace(
a = F.linspace(3,10,5) a = F.linspace(3,10,5)
print(a.numpy()) print(a.numpy())


Outputs:
.. testoutput:: .. testoutput::


[ 3. 4.75 6.5 8.25 10. ] [ 3. 4.75 6.5 8.25 10. ]
@@ -930,6 +934,8 @@ def arange(


a = F.arange(5) a = F.arange(5)
print(a.numpy()) print(a.numpy())
Outputs:


.. testoutput:: .. testoutput::


@@ -977,7 +983,9 @@ def param_pack_split(inp: Tensor, offsets: List, shapes: List) -> Tensor:
b, c = F.param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)]) b, c = F.param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)])
print(b.numpy()) print(b.numpy())
print(c.numpy()) print(c.numpy())

Outputs:
.. testoutput:: .. testoutput::


[1] [1]
@@ -1000,7 +1008,7 @@ def param_pack_concat(inps: List, offsets: Tensor, offsets_val: List) -> Tensor:
:param offsets: device value of offsets :param offsets: device value of offsets
:param offsets_val: offsets of inputs, length of 2 * n, :param offsets_val: offsets of inputs, length of 2 * n,
format [begin0, end0, begin1, end1]. format [begin0, end0, begin1, end1].
:return: split tensors
:return: concat tensors


Examples: Examples:


@@ -1013,10 +1021,12 @@ def param_pack_concat(inps: List, offsets: Tensor, offsets_val: List) -> Tensor:
a = tensor(np.ones((1,), np.int32)) a = tensor(np.ones((1,), np.int32))
b = tensor(np.ones((3, 3), np.int32)) b = tensor(np.ones((3, 3), np.int32))
offsets_val = [0, 1, 1, 10] offsets_val = [0, 1, 1, 10]
offsets = tensor(offsets, np.int32)
offsets = tensor(offsets_val, np.int32)
c = F.param_pack_concat([a, b], offsets, offsets_val) c = F.param_pack_concat([a, b], offsets, offsets_val)
print(c.numpy()) print(c.numpy())

Outputs:
.. testoutput:: .. testoutput::


[1 1 1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1 1 1]


+ 0
- 13
imperative/python/megengine/functional/utils.py View File

@@ -63,19 +63,6 @@ def accuracy(
return accs return accs




def zero_grad(inp: Tensor) -> Tensor:
r"""
Returns a tensor which is treated as constant during backward gradient calcuation,
i.e. its gradient is zero.

:param inp: Input tensor.

See implementation of :func:`~.softmax` for example.
"""
print("zero_grad is obsoleted, please use detach instead")
raise NotImplementedError


def copy(inp, cn): def copy(inp, cn):
r""" r"""
Copy tensor to another device. Copy tensor to another device.


+ 1
- 1
imperative/python/megengine/module/activation.py View File

@@ -219,7 +219,7 @@ class LeakyReLU(Module):


.. testoutput:: .. testoutput::


[-0.08 -0.12 6. 10. ]
[-0.08 -0.12 6. 10. ]


""" """




+ 5
- 3
imperative/python/megengine/module/batchnorm.py View File

@@ -267,15 +267,17 @@ class BatchNorm2d(_BatchNorm):
m = M.BatchNorm2d(4) m = M.BatchNorm2d(4)
inp = mge.tensor(np.random.rand(1, 4, 3, 3).astype("float32")) inp = mge.tensor(np.random.rand(1, 4, 3, 3).astype("float32"))
oup = m(inp) oup = m(inp)
print(m.weight, m.bias)
print(m.weight.numpy(), m.bias.numpy())
# Without Learnable Parameters # Without Learnable Parameters
m = M.BatchNorm2d(4, affine=False) m = M.BatchNorm2d(4, affine=False)
oup = m(inp) oup = m(inp)
print(m.weight, m.bias) print(m.weight, m.bias)

Outputs:
.. testoutput:: .. testoutput::


Tensor([1. 1. 1. 1.]) Tensor([0. 0. 0. 0.])
[1. 1. 1. 1.] [0. 0. 0. 0.]
None None None None
""" """




+ 12
- 10
imperative/python/megengine/module/sequential.py View File

@@ -17,23 +17,25 @@ class Sequential(Module):
Alternatively, an ordered dict of modules can also be passed in. Alternatively, an ordered dict of modules can also be passed in.


To make it easier to understand, here is a small example: To make it easier to understand, here is a small example:

Examples:
.. testcode:: .. testcode::


import numpy as np import numpy as np
import megengine.nn as nn
import megengine.nn.functional as F
from megengine import tensor
import megengine.functional as F


batch_size = 64 batch_size = 64
data = nn.Input("data", shape=(batch_size, 1, 28, 28), dtype=np.float32, value=np.zeros((batch_size, 1, 28, 28)))
label = nn.Input("label", shape=(batch_size,), dtype=np.int32, value=np.zeros(batch_size,))
data = tensor(np.zeros((batch_size, 1, 28, 28)), dtype=np.float32)
label = tensor(np.zeros(batch_size,), dtype=np.int32)


data = data.reshape(batch_size, -1) data = data.reshape(batch_size, -1)
net = nn.Sequential(
nn.Linear(28 * 28, 320),
nn.Linear(320, 500),
nn.Linear(500, 320),
nn.Linear(320, 10)
net = M.Sequential(
M.Linear(28 * 28, 320),
M.Linear(320, 500),
M.Linear(500, 320),
M.Linear(320, 10)
) )
pred = net(data) pred = net(data)




+ 6
- 2
imperative/python/megengine/random/distribution.py View File

@@ -37,7 +37,9 @@ def normal(


x = rand.normal(mean=0, std=1, size=(2, 2)) x = rand.normal(mean=0, std=1, size=(2, 2))
print(x.numpy()) print(x.numpy())

Outputs:
.. testoutput:: .. testoutput::
:options: +SKIP :options: +SKIP


@@ -73,7 +75,9 @@ def uniform(


x = rand.uniform(size=(2, 2)) x = rand.uniform(size=(2, 2))
print(x.numpy()) print(x.numpy())

Outputs:
.. testoutput:: .. testoutput::
:options: +SKIP :options: +SKIP




Loading…
Cancel
Save