You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

init.py 9.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. # -*- coding: utf-8 -*-
  2. import math
  3. from functools import reduce
  4. from typing import Optional, Tuple, Union
  5. import numpy as np
  6. from ..functional import full
  7. from ..random import normal, uniform
  8. from ..tensor import Tensor
  9. def fill_(tensor: Tensor, val: Union[float, int]) -> None:
  10. """Fills the given ``tensor`` with value ``val``.
  11. Args:
  12. tensor: tensor to be initialized.
  13. val: value to be filled throughout the tensor.
  14. """
  15. tensor._reset(full(shape=tensor.shape, value=val, dtype=tensor.dtype))
  16. def zeros_(tensor: Tensor) -> None:
  17. """Fills the given ``tensor`` with scalar value `0`.
  18. Args:
  19. tensor: tensor to be initialized.
  20. """
  21. fill_(tensor, 0)
  22. def ones_(tensor: Tensor) -> None:
  23. """Fills the given ``tensor`` with the scalar value `1`.
  24. Args:
  25. tensor: tensor to be initialized.
  26. """
  27. fill_(tensor, 1)
  28. def uniform_(tensor: Tensor, a: float = 0.0, b: float = 1.0) -> None:
  29. r"""Fills the given ``tensor`` with random value sampled from uniform distribution
  30. :math:`\mathcal{U}(\text{a}, \text{b})`.
  31. Args:
  32. tensor: tensor to be initialized.
  33. a: lower bound of the sampling interval.
  34. b: upper bound of the sampling interval.
  35. """
  36. tensor._reset(uniform(size=tensor.shape, low=a, high=b).astype(tensor.dtype))
  37. def normal_(tensor: Tensor, mean: float = 0.0, std: float = 1.0) -> None:
  38. r"""Fills the given ``tensor`` with random value sampled from normal distribution
  39. :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
  40. Args:
  41. tensor: tensor to be initialized.
  42. mean: mean of the normal distribution.
  43. std: standard deviation of the normal distribution.
  44. """
  45. tensor._reset(normal(size=tensor.shape, mean=mean, std=std).astype(tensor.dtype))
  46. def calculate_gain(
  47. nonlinearity: str, param: Optional[Union[int, float]] = None
  48. ) -> float:
  49. r"""Returns a recommended gain value (see the table below) for the given nonlinearity
  50. function.
  51. ================= ====================================================
  52. nonlinearity gain
  53. ================= ====================================================
  54. Linear / Identity :math:`1`
  55. Conv{1,2,3}D :math:`1`
  56. Sigmoid :math:`1`
  57. Tanh :math:`\frac{5}{3}`
  58. ReLU :math:`\sqrt{2}`
  59. Leaky Relu :math:`\sqrt{\frac{2}{1 + {\text{negative}_\text{slope}}^2}}`
  60. ================= ====================================================
  61. Args:
  62. nonlinearity: name of the non-linear function.
  63. param: optional parameter for leaky_relu. Only effective when
  64. ``nonlinearity`` is "leaky_relu".
  65. """
  66. linear_fns = [
  67. "linear",
  68. "conv1d",
  69. "conv2d",
  70. "conv3d",
  71. "conv_transpose1d",
  72. "conv_transpose2d",
  73. "conv_transpose3d",
  74. ]
  75. if nonlinearity in linear_fns or nonlinearity == "sigmoid":
  76. return 1
  77. if nonlinearity == "tanh":
  78. return 5.0 / 3
  79. if nonlinearity == "relu":
  80. return math.sqrt(2.0)
  81. if nonlinearity == "leaky_relu":
  82. if param is None:
  83. negative_slope = 0.01
  84. elif (
  85. not isinstance(param, bool)
  86. and isinstance(param, int)
  87. or isinstance(param, float)
  88. ):
  89. # True/False are instances of int, hence check above
  90. negative_slope = param
  91. else:
  92. raise ValueError("negative_slope {} not a valid number".format(param))
  93. return math.sqrt(2.0 / (1 + negative_slope ** 2))
  94. raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
  95. def calculate_fan_in_and_fan_out(tensor: Tensor) -> Tuple[float, float]:
  96. r"""Calculates fan_in / fan_out value for given weight tensor. This function assumes
  97. input tensor is stored in ``NCHW`` format.
  98. Note:
  99. The group conv2d kernel shape in MegEngine is ``(G, O/G, I/G, K, K)``. This
  100. function calculates ``fan_out = O/G * K * K`` as default, but PyTorch uses
  101. ``fan_out = O * K * K``.
  102. Args:
  103. tensor: weight tensor in ``NCHW`` format.
  104. """
  105. shape = tensor.shape
  106. ndim = len(shape)
  107. if ndim < 2:
  108. raise ValueError(
  109. "fan_in and fan_out can not be computed for tensor with fewer than 2 "
  110. "dimensions"
  111. )
  112. if ndim == 2: # Linear
  113. fan_in = shape[1]
  114. fan_out = shape[0]
  115. else:
  116. if ndim >= 5:
  117. # ignore the groups dimension of group conv2d and group conv3d
  118. # FIXME: will be wrong for conv3d
  119. shape = shape[1:]
  120. num_input_fmaps = shape[1]
  121. num_output_fmaps = shape[0]
  122. receptive_field_size = 1
  123. if ndim > 2:
  124. receptive_field_size = reduce(lambda x, y: x * y, shape[2:], 1)
  125. fan_in = num_input_fmaps * receptive_field_size
  126. fan_out = num_output_fmaps * receptive_field_size
  127. return fan_in, fan_out
  128. def calculate_correct_fan(tensor: Tensor, mode: str) -> float:
  129. r"""Calculates fan_in / fan_out value for given weight tensor, depending on given
  130. ``mode``.
  131. See :func:`calculate_fan_in_and_fan_out` for details.
  132. Args:
  133. tensor: weight tensor in ``NCHW`` format.
  134. mode: fan_in" or "fan_out".
  135. """
  136. mode = mode.lower()
  137. valid_modes = ["fan_in", "fan_out"]
  138. if mode not in valid_modes:
  139. raise ValueError(
  140. "Mode {} not supported, please use one of {}".format(mode, valid_modes)
  141. )
  142. fan_in, fan_out = calculate_fan_in_and_fan_out(tensor)
  143. return fan_in if mode == "fan_in" else fan_out
  144. def xavier_uniform_(tensor: Tensor, gain: float = 1.0) -> None:
  145. r"""Fills tensor with random values sampled from :math:`\mathcal{U}(-a, a)`
  146. where
  147. .. math::
  148. a = \text{gain} \times \sqrt{\frac{6}{\text{fan_in} + \text{fan_out}}}
  149. Also known as Glorot initialization. Detailed information can be retrieved from
  150. `Understanding the difficulty of training deep feedforward neural networks` -
  151. Glorot, X. & Bengio, Y. (2010).
  152. Args:
  153. tensor: tensor to be initialized.
  154. gain: scaling factor for :math:`a`.
  155. """
  156. fan_in, fan_out = calculate_fan_in_and_fan_out(tensor)
  157. std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
  158. a = math.sqrt(3.0) * std
  159. uniform_(tensor, -a, a)
  160. def xavier_normal_(tensor: Tensor, gain: float = 1.0) -> None:
  161. r"""Fills tensor with random values sampled from
  162. :math:`\mathcal{N}(0, \text{std}^2)` where
  163. .. math::
  164. \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan_in} + \text{fan_out}}}
  165. Also known as Glorot initialization. Detailed information can be retrieved from
  166. `Understanding the difficulty of training deep feedforward neural networks` -
  167. Glorot, X. & Bengio, Y. (2010).
  168. Args:
  169. tensor: tensor to be initialized.
  170. gain: scaling factor for :math:`std`.
  171. """
  172. fan_in, fan_out = calculate_fan_in_and_fan_out(tensor)
  173. std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
  174. normal_(tensor, 0.0, std)
  175. def msra_uniform_(
  176. tensor: Tensor, a: float = 0, mode: str = "fan_in", nonlinearity: str = "leaky_relu"
  177. ) -> None:
  178. r"""Fills tensor wilth random values sampled from
  179. :math:`\mathcal{U}(-\text{bound}, \text{bound})` where
  180. .. math::
  181. \text{bound} = \sqrt{\frac{6}{(1 + a^2) \times \text{fan_in}}}
  182. Detailed information can be retrieved from
  183. `Delving deep into rectifiers: Surpassing human-level performance on ImageNet
  184. classification`
  185. Args:
  186. tensor: tensor to be initialized.
  187. a: optional parameter for calculating gain for leaky_relu. See
  188. :func:`calculate_gain` for details.
  189. mode: fan_in" or "fan_out", used to calculate :math:`gain`, the
  190. scaling factor for :math:`bound`. See :func:`calculate_fan_in_and_fan_out` for
  191. details.
  192. nonlinearity: name of the non-linear function used to calculate :math:`gain`.
  193. See :func:`calculate_gain` for details.
  194. """
  195. fan = calculate_correct_fan(tensor, mode)
  196. gain = calculate_gain(nonlinearity, a)
  197. std = gain / math.sqrt(fan)
  198. bound = math.sqrt(3.0) * std
  199. uniform_(tensor, -bound, bound)
  200. def msra_normal_(
  201. tensor: Tensor, a: float = 0, mode: str = "fan_in", nonlinearity: str = "leaky_relu"
  202. ) -> None:
  203. r"""Fills tensor wilth random values sampled from
  204. :math:`\mathcal{N}(0, \text{std}^2)` where
  205. .. math::
  206. \text{std} = \sqrt{\frac{2}{(1 + a^2) \times \text{fan_in}}}
  207. Detailed information can be retrieved from
  208. `Delving deep into rectifiers: Surpassing human-level performance on ImageNet
  209. classification`
  210. Args:
  211. tensor: tensor to be initialized
  212. a: optional parameter for calculating gain for leaky_relu. See
  213. :func:`calculate_gain` for details.
  214. mode: fan_in" or "fan_out", used to calculate :math:`gain`, the
  215. scaling factor for :math:`gain`. See :func:`calculate_fan_in_and_fan_out` for
  216. details.
  217. nonlinearity: name of the non-linear function used to calculate :math:`gain`.
  218. See :func:`calculate_gain` for details.
  219. """
  220. fan = calculate_correct_fan(tensor, mode)
  221. gain = calculate_gain(nonlinearity, a)
  222. std = gain / math.sqrt(fan)
  223. normal_(tensor, 0, std)