GitOrigin-RevId: 1d82209c40
tags/v0.3.2
@@ -190,7 +190,6 @@ def sqrt(inp: Tensor) -> Tensor: | |||||
Outputs: | Outputs: | ||||
.. testoutput:: | .. testoutput:: | ||||
:options: +NUMBER | |||||
[[0. 1. 1.4142] | [[0. 1. 1.4142] | ||||
[1.7321 2. 2.2361 ]] | [1.7321 2. 2.2361 ]] | ||||
@@ -636,7 +636,6 @@ def interpolate( | |||||
Outputs: | Outputs: | ||||
.. testoutput:: | .. testoutput:: | ||||
:options: +NUMBER | |||||
[[[[1. 1.25 1.75 2. ] | [[[[1. 1.25 1.75 2. ] | ||||
[1.5 1.75 2.25 2.5 ] | [1.5 1.75 2.25 2.5 ] | ||||
@@ -39,7 +39,6 @@ def argsort(inp: Tensor, descending: bool = False) -> Tuple[Tensor, Tensor]: | |||||
Outputs: | Outputs: | ||||
.. testoutput:: | .. testoutput:: | ||||
:options: +NUMBER | |||||
[1. 2.] [0 1] | [1. 2.] [0 1] | ||||
@@ -93,7 +92,6 @@ def top_k( | |||||
Outputs: | Outputs: | ||||
.. testoutput:: | .. testoutput:: | ||||
:options: +NUMBER | |||||
[1. 2. 3. 4. 5.] [7 0 6 1 5] | [1. 2. 3. 4. 5.] [7 0 6 1 5] | ||||
@@ -50,7 +50,6 @@ def accuracy(logits: Tensor, target: Tensor, topk: Union[int, Iterable[int]] = 1 | |||||
Outputs: | Outputs: | ||||
.. testoutput:: | .. testoutput:: | ||||
:options: +NUMBER | |||||
[0.] [0.375] | [0.] [0.375] | ||||
""" | """ | ||||
@@ -20,7 +20,7 @@ class Softmax(Module): | |||||
.. math:: | .. math:: | ||||
\text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)} | \text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)} | ||||
It is applied to an n-dimensional input Tensor and rescaling them so that the elements of the | |||||
It is applied to an n-dimensional input Tensor and rescaling them so that the elements of the | |||||
n-dimensional output Tensor lie in the range of `[0, 1]` and sum to 1. | n-dimensional output Tensor lie in the range of `[0, 1]` and sum to 1. | ||||
:param axis: An axis along which softmax will be applied. By default, | :param axis: An axis along which softmax will be applied. By default, | ||||
@@ -137,8 +137,8 @@ class PReLU(Module): | |||||
ax, & \text{ otherwise } | ax, & \text{ otherwise } | ||||
\end{cases} | \end{cases} | ||||
Here :math:`a` is a learnable parameter. When called without arguments, `PReLU()` uses | |||||
a single paramter :math:`a` across all input channel. If called with `PReLU(num_of_channels)`, | |||||
Here :math:`a` is a learnable parameter. When called without arguments, `PReLU()` uses | |||||
a single paramter :math:`a` across all input channel. If called with `PReLU(num_of_channels)`, | |||||
a seperate :math:`a` is used for each input channle. | a seperate :math:`a` is used for each input channle. | ||||
:param num_parameters: number of :math:`a` to learn, there is only two | :param num_parameters: number of :math:`a` to learn, there is only two | ||||
@@ -218,7 +218,6 @@ class LeakyReLU(Module): | |||||
Outputs: | Outputs: | ||||
.. testoutput:: | .. testoutput:: | ||||
:options: +NUMBER | |||||
[-0.08 -0.12 6. 10. ] | [-0.08 -0.12 6. 10. ] | ||||
@@ -21,7 +21,7 @@ class Embedding(Module): | |||||
A simple lookup table that stores embeddings of a fixed dictionary and size. | A simple lookup table that stores embeddings of a fixed dictionary and size. | ||||
This module is often used to store word embeddings and retrieve them using indices. | This module is often used to store word embeddings and retrieve them using indices. | ||||
The input to the module is a list of indices, and the output is the corresponding word embeddings. | |||||
The input to the module is a list of indices, and the output is the corresponding word embeddings. | |||||
The indices should less than num_embeddings. | The indices should less than num_embeddings. | ||||
:param num_embeddings: size of embedding dictionary. | :param num_embeddings: size of embedding dictionary. | ||||
@@ -138,7 +138,6 @@ class Embedding(Module): | |||||
Outputs: | Outputs: | ||||
.. testoutput:: | .. testoutput:: | ||||
:options: +NUMBER | |||||
[[[1.2 2.3 3.4 4.5 5.6] | [[[1.2 2.3 3.4 4.5 5.6] | ||||
[0.1 1.1 2.1 3.1 4.1] | [0.1 1.1 2.1 3.1 4.1] | ||||