You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

loss.py 7.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import numpy as np
  10. from ..core.tensor.utils import make_shape_tuple
  11. from ..tensor import Tensor
  12. from .elemwise import abs, equal, exp, log, maximum, pow, relu
  13. from .nn import indexing_one_hot, logsigmoid, logsumexp
  14. from .tensor import where
  15. __all__ = [
  16. "l1_loss",
  17. "square_loss",
  18. "cross_entropy",
  19. "binary_cross_entropy",
  20. "hinge_loss",
  21. ]
  22. def l1_loss(pred: Tensor, label: Tensor) -> Tensor:
  23. r"""
  24. Calculates the mean absolute error (MAE) between
  25. each element in the pred :math:`x` and label :math:`y`.
  26. The mean absolute error can be described as:
  27. .. math:: \ell(x,y) = mean\left(L \right)
  28. where
  29. .. math::
  30. L = \{l_1,\dots,l_N\}, \quad
  31. l_n = \left| x_n - y_n \right|,
  32. :math:`x` and :math:`y` are tensors of arbitrary shapes with a total
  33. of :math:`N` elements each. :math:`N` is the batch size.
  34. :param pred: predicted result from model.
  35. :param label: ground truth to compare.
  36. :return: loss value.
  37. Examples:
  38. .. testcode::
  39. import numpy as np
  40. import megengine as mge
  41. import megengine.functional as F
  42. ipt = mge.tensor(np.array([3, 3, 3, 3]).astype(np.float32))
  43. tgt = mge.tensor(np.array([2, 8, 6, 1]).astype(np.float32))
  44. loss = F.nn.l1_loss(ipt, tgt)
  45. print(loss.numpy())
  46. Outputs:
  47. .. testoutput::
  48. [2.75]
  49. """
  50. diff = pred - label
  51. return abs(diff).mean()
  52. def square_loss(pred: Tensor, label: Tensor) -> Tensor:
  53. r"""
  54. Calculates the mean squared error (squared L2 norm) between
  55. each element in the pred :math:`x` and label :math:`y`.
  56. The mean squared error can be described as:
  57. .. math:: \ell(x, y) = mean\left( L \right)
  58. where
  59. .. math::
  60. L = \{l_1,\dots,l_N\}, \quad
  61. l_n = \left( x_n - y_n \right)^2,
  62. :math:`x` and :math:`y` are tensors of arbitrary shapes with a total
  63. of :math:`N` elements each. :math:`N` is the batch size.
  64. :param pred: predicted result from model.
  65. :param label: ground truth to compare.
  66. :return: loss value.
  67. Shape:
  68. - pred: :math:`(N, *)` where :math:`*` means any number of additional
  69. dimensions.
  70. - label: :math:`(N, *)`. Same shape as ``pred``.
  71. Examples:
  72. .. testcode::
  73. import numpy as np
  74. import megengine as mge
  75. import megengine.functional as F
  76. ipt = mge.tensor(np.array([3, 3, 3, 3]).astype(np.float32))
  77. tgt = mge.tensor(np.array([2, 8, 6, 1]).astype(np.float32))
  78. loss = F.nn.square_loss(ipt, tgt)
  79. print(loss.numpy())
  80. Outputs:
  81. .. testoutput::
  82. [9.75]
  83. """
  84. diff = pred - label
  85. return (diff ** 2).mean()
  86. def cross_entropy(
  87. pred: Tensor,
  88. label: Tensor,
  89. axis: int = 1,
  90. with_logits: bool = True,
  91. label_smooth: float = 0,
  92. ) -> Tensor:
  93. r"""
  94. Computes the multi-class cross entropy loss (using logits by default).
  95. By default(``with_logitis`` is True), ``pred`` is assumed to be logits,
  96. class probabilities are given by softmax.
  97. It has better numerical stability compared with sequential calls to :func:`~.softmax` and :func:`~.cross_entropy`.
  98. When using label smoothing, the label distribution is as follows:
  99. .. math:: y^{LS}_{k}=y_{k}\left(1-\alpha\right)+\alpha/K
  100. where :math:`y^{LS}` and :math:`y` are new label distribution and origin label distribution respectively.
  101. k is the index of label distribution. :math:`\alpha` is ``label_smooth`` and :math:`K` is the number of classes.
  102. :param pred: input tensor representing the predicted probability.
  103. :param label: input tensor representing the classification label.
  104. :param axis: an axis along which softmax will be applied. Default: 1
  105. :param with_logits: whether to apply softmax first. Default: True
  106. :param label_smooth: a label smoothing of parameter that can re-distribute target distribution. Default: 0
  107. :return: loss value.
  108. Examples:
  109. .. testcode::
  110. import numpy as np
  111. from megengine import tensor
  112. import megengine.functional as F
  113. data_shape = (1, 2)
  114. label_shape = (1, )
  115. pred = tensor(np.array([0, 0], dtype=np.float32).reshape(data_shape))
  116. label = tensor(np.ones(label_shape, dtype=np.int32))
  117. loss = F.nn.cross_entropy(pred, label)
  118. print(loss.numpy().round(decimals=4))
  119. Outputs:
  120. .. testoutput::
  121. [0.6931]
  122. """
  123. n0 = pred.ndim
  124. n1 = label.ndim
  125. assert n0 == n1 + 1, (
  126. "target ndim must be one less than input ndim; input_ndim={} "
  127. "target_ndim={}".format(n0, n1)
  128. )
  129. ls = label_smooth
  130. if with_logits:
  131. logZ = logsumexp(pred, axis).mean()
  132. primary_term = indexing_one_hot(pred, label, axis).mean()
  133. else:
  134. logZ = 0
  135. primary_term = log(indexing_one_hot(pred, label, axis)).mean()
  136. if ls is None or type(ls) in (int, float) and ls == 0:
  137. return logZ - primary_term
  138. if not with_logits:
  139. pred = log(pred)
  140. return logZ - ls * pred.mean() - (1 - ls) * primary_term
  141. def binary_cross_entropy(
  142. pred: Tensor, label: Tensor, with_logits: bool = True
  143. ) -> Tensor:
  144. r"""
  145. Computes the binary cross entropy loss (using logits by default).
  146. By default(``with_logitis`` is True), ``pred`` is assumed to be logits,
  147. class probabilities are given by sigmoid.
  148. :param pred: `(N, *)`, where `*` means any number of additional dimensions.
  149. :param label: `(N, *)`, same shape as the input.
  150. :param with_logits: bool, whether to apply sigmoid first. Default: True
  151. :return: loss value.
  152. Examples:
  153. .. testcode::
  154. import numpy as np
  155. from megengine import tensor
  156. import megengine.functional as F
  157. pred = tensor(np.array([0, 0], dtype=np.float32).reshape(1, 2))
  158. label = tensor(np.ones((1, 2), dtype=np.float32))
  159. loss = F.nn.binary_cross_entropy(pred, label)
  160. print(loss.numpy().round(decimals=4))
  161. Outputs:
  162. .. testoutput::
  163. [0.6931]
  164. """
  165. if not with_logits:
  166. return -(label * log(pred) + (1 - label) * log(1 - pred)).mean()
  167. # logsigmoid(pred) and logsigmoid(-pred) has common sub-expression
  168. # hopefully the backend would optimize this
  169. return -(label * logsigmoid(pred) + (1 - label) * logsigmoid(-pred)).mean()
  170. def hinge_loss(pred: Tensor, label: Tensor, norm: str = "L1") -> Tensor:
  171. r"""
  172. Caculates the hinge loss which is often used in SVM.
  173. The hinge loss can be described as:
  174. .. math:: loss(x, y) = \frac{1}{N}\sum_i\sum_j(max(0, 1 - x_{ij}*y_{ij}))
  175. :param pred: input tensor representing the predicted probability, shape is `(N, C)`.
  176. :param label: input tensor representing the binary classification label, shape is `(N, C)`.
  177. :param norm: specify the norm to caculate the loss, should be "L1" or "L2".
  178. :return: loss value.
  179. Examples:
  180. .. testcode::
  181. from megengine import tensor
  182. import megengine.functional as F
  183. pred = tensor([[0.5, -0.5, 0.1], [-0.6, 0.7, 0.8]], dtype="float32")
  184. label = tensor([[1, -1, -1], [-1, 1, 1]], dtype="float32")
  185. loss = F.nn.hinge_loss(pred, label)
  186. print(loss.numpy())
  187. Outputs:
  188. .. testoutput::
  189. [1.5]
  190. """
  191. assert norm in ["L1", "L2"], "norm must be L1 or L2"
  192. # Converts binary labels to -1/1 labels.
  193. loss = relu(1.0 - pred * label)
  194. if norm == "L1":
  195. return loss.sum(axis=1).mean()
  196. else:
  197. return (loss ** 2).sum(axis=1).mean()

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台