|
@@ -69,4 +69,12 @@ def accuracy(logits: Tensor, target: Tensor, topk: Union[int, Iterable[int]] = 1 |
|
|
|
|
|
|
|
|
@wrap_io_tensor |
|
|
@wrap_io_tensor |
|
|
def zero_grad(inp: Tensor) -> Tensor: |
|
|
def zero_grad(inp: Tensor) -> Tensor: |
|
|
|
|
|
r""" |
|
|
|
|
|
Returns a tensor which is treated as constant during backward gradient calcuation, |
|
|
|
|
|
i.e. its gradient is zero. |
|
|
|
|
|
|
|
|
|
|
|
:param inp: Input tensor. |
|
|
|
|
|
|
|
|
|
|
|
See implementation of :func:`~.softmax` for example. |
|
|
|
|
|
""" |
|
|
return mgb.opr.zero_grad(inp) |
|
|
return mgb.opr.zero_grad(inp) |