You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

init.py 9.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import math
  10. from functools import reduce
  11. from typing import Optional, Tuple, Union
  12. import numpy as np
  13. from ..functional import full
  14. from ..random import normal, uniform
  15. from ..tensor import Tensor
  16. def fill_(tensor: Tensor, val: Union[float, int]) -> None:
  17. """Fills the given ``tensor`` with value ``val``.
  18. Args:
  19. tensor: tensor to be initialized.
  20. val: value to be filled throughout the tensor.
  21. """
  22. tensor._reset(full(shape=tensor.shape, value=val, dtype=tensor.dtype))
  23. def zeros_(tensor: Tensor) -> None:
  24. """Fills the given ``tensor`` with scalar value `0`.
  25. Args:
  26. tensor: tensor to be initialized.
  27. """
  28. fill_(tensor, 0)
  29. def ones_(tensor: Tensor) -> None:
  30. """Fills the given ``tensor`` with the scalar value `1`.
  31. Args:
  32. tensor: tensor to be initialized.
  33. """
  34. fill_(tensor, 1)
  35. def uniform_(tensor: Tensor, a: float = 0.0, b: float = 1.0) -> None:
  36. r"""Fills the given ``tensor`` with random value sampled from uniform distribution
  37. :math:`\mathcal{U}(\text{a}, \text{b})`.
  38. Args:
  39. tensor: tensor to be initialized.
  40. a: lower bound of the sampling interval.
  41. b: upper bound of the sampling interval.
  42. """
  43. tensor._reset(uniform(size=tensor.shape, low=a, high=b).astype(tensor.dtype))
  44. def normal_(tensor: Tensor, mean: float = 0.0, std: float = 1.0) -> None:
  45. r"""Fills the given ``tensor`` with random value sampled from normal distribution
  46. :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
  47. Args:
  48. tensor: tensor to be initialized.
  49. mean: mean of the normal distribution.
  50. std: standard deviation of the normal distribution.
  51. """
  52. tensor._reset(normal(size=tensor.shape, mean=mean, std=std).astype(tensor.dtype))
  53. def calculate_gain(
  54. nonlinearity: str, param: Optional[Union[int, float]] = None
  55. ) -> float:
  56. r"""Returns a recommended gain value (see the table below) for the given nonlinearity
  57. function.
  58. ================= ====================================================
  59. nonlinearity gain
  60. ================= ====================================================
  61. Linear / Identity :math:`1`
  62. Conv{1,2,3}D :math:`1`
  63. Sigmoid :math:`1`
  64. Tanh :math:`\frac{5}{3}`
  65. ReLU :math:`\sqrt{2}`
  66. Leaky Relu :math:`\sqrt{\frac{2}{1 + {\text{negative}_\text{slope}}^2}}`
  67. ================= ====================================================
  68. Args:
  69. nonlinearity: name of the non-linear function.
  70. param: optional parameter for leaky_relu. Only effective when
  71. ``nonlinearity`` is "leaky_relu".
  72. """
  73. linear_fns = [
  74. "linear",
  75. "conv1d",
  76. "conv2d",
  77. "conv3d",
  78. "conv_transpose1d",
  79. "conv_transpose2d",
  80. "conv_transpose3d",
  81. ]
  82. if nonlinearity in linear_fns or nonlinearity == "sigmoid":
  83. return 1
  84. if nonlinearity == "tanh":
  85. return 5.0 / 3
  86. if nonlinearity == "relu":
  87. return math.sqrt(2.0)
  88. if nonlinearity == "leaky_relu":
  89. if param is None:
  90. negative_slope = 0.01
  91. elif (
  92. not isinstance(param, bool)
  93. and isinstance(param, int)
  94. or isinstance(param, float)
  95. ):
  96. # True/False are instances of int, hence check above
  97. negative_slope = param
  98. else:
  99. raise ValueError("negative_slope {} not a valid number".format(param))
  100. return math.sqrt(2.0 / (1 + negative_slope ** 2))
  101. raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
  102. def calculate_fan_in_and_fan_out(tensor: Tensor) -> Tuple[float, float]:
  103. r"""Calculates fan_in / fan_out value for given weight tensor. This function assumes
  104. input tensor is stored in ``NCHW`` format.
  105. Args:
  106. tensor: weight tensor in ``NCHW`` format.
  107. """
  108. shape = tensor.shape
  109. ndim = len(shape)
  110. if ndim < 2:
  111. raise ValueError(
  112. "fan_in and fan_out can not be computed for tensor with fewer than 2 "
  113. "dimensions"
  114. )
  115. if ndim == 2: # Linear
  116. fan_in = shape[1]
  117. fan_out = shape[0]
  118. else:
  119. num_input_fmaps = shape[1]
  120. num_output_fmaps = shape[0]
  121. receptive_field_size = 1
  122. if ndim > 2:
  123. receptive_field_size = reduce(lambda x, y: x * y, shape[2:], 1)
  124. fan_in = num_input_fmaps * receptive_field_size
  125. fan_out = num_output_fmaps * receptive_field_size
  126. return fan_in, fan_out
  127. def calculate_correct_fan(tensor: Tensor, mode: str) -> float:
  128. r"""Calculates fan_in / fan_out value for given weight tensor, depending on given
  129. ``mode``.
  130. See :func:`calculate_fan_in_and_fan_out` for details.
  131. Args:
  132. tensor: weight tensor in ``NCHW`` format.
  133. mode: fan_in" or "fan_out".
  134. """
  135. mode = mode.lower()
  136. valid_modes = ["fan_in", "fan_out"]
  137. if mode not in valid_modes:
  138. raise ValueError(
  139. "Mode {} not supported, please use one of {}".format(mode, valid_modes)
  140. )
  141. fan_in, fan_out = calculate_fan_in_and_fan_out(tensor)
  142. return fan_in if mode == "fan_in" else fan_out
  143. def xavier_uniform_(tensor: Tensor, gain: float = 1.0) -> None:
  144. r"""Fills tensor with random values sampled from :math:`\mathcal{U}(-a, a)`
  145. where
  146. .. math::
  147. a = \text{gain} \times \sqrt{\frac{6}{\text{fan_in} + \text{fan_out}}}
  148. Also known as Glorot initialization. Detailed information can be retrieved from
  149. `Understanding the difficulty of training deep feedforward neural networks` -
  150. Glorot, X. & Bengio, Y. (2010).
  151. Args:
  152. tensor: tensor to be initialized.
  153. gain: scaling factor for :math:`a`.
  154. """
  155. fan_in, fan_out = calculate_fan_in_and_fan_out(tensor)
  156. std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
  157. a = math.sqrt(3.0) * std
  158. uniform_(tensor, -a, a)
  159. def xavier_normal_(tensor: Tensor, gain: float = 1.0) -> None:
  160. r"""Fills tensor with random values sampled from
  161. :math:`\mathcal{N}(0, \text{std}^2)` where
  162. .. math::
  163. \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan_in} + \text{fan_out}}}
  164. Also known as Glorot initialization. Detailed information can be retrieved from
  165. `Understanding the difficulty of training deep feedforward neural networks` -
  166. Glorot, X. & Bengio, Y. (2010).
  167. Args:
  168. tensor: tensor to be initialized.
  169. gain: scaling factor for :math:`std`.
  170. """
  171. fan_in, fan_out = calculate_fan_in_and_fan_out(tensor)
  172. std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
  173. normal_(tensor, 0.0, std)
  174. def msra_uniform_(
  175. tensor: Tensor, a: float = 0, mode: str = "fan_in", nonlinearity: str = "leaky_relu"
  176. ) -> None:
  177. r"""Fills tensor wilth random values sampled from
  178. :math:`\mathcal{U}(-\text{bound}, \text{bound})` where
  179. .. math::
  180. \text{bound} = \sqrt{\frac{6}{(1 + a^2) \times \text{fan_in}}}
  181. Detailed information can be retrieved from
  182. `Delving deep into rectifiers: Surpassing human-level performance on ImageNet
  183. classification`
  184. Args:
  185. tensor: tensor to be initialized.
  186. a: optional parameter for calculating gain for leaky_relu. See
  187. :func:`calculate_gain` for details.
  188. mode: fan_in" or "fan_out", used to calculate :math:`gain`, the
  189. scaling factor for :math:`bound`. See :func:`calculate_fan_in_and_fan_out` for
  190. details.
  191. nonlinearity: name of the non-linear function used to calculate :math:`gain`.
  192. See :func:`calculate_gain` for details.
  193. """
  194. fan = calculate_correct_fan(tensor, mode)
  195. gain = calculate_gain(nonlinearity, a)
  196. std = gain / math.sqrt(fan)
  197. bound = math.sqrt(3.0) * std
  198. uniform_(tensor, -bound, bound)
  199. def msra_normal_(
  200. tensor: Tensor, a: float = 0, mode: str = "fan_in", nonlinearity: str = "leaky_relu"
  201. ) -> None:
  202. r"""Fills tensor wilth random values sampled from
  203. :math:`\mathcal{N}(0, \text{std}^2)` where
  204. .. math::
  205. \text{std} = \sqrt{\frac{2}{(1 + a^2) \times \text{fan_in}}}
  206. Detailed information can be retrieved from
  207. `Delving deep into rectifiers: Surpassing human-level performance on ImageNet
  208. classification`
  209. Args:
  210. tensor: tensor to be initialized
  211. a: optional parameter for calculating gain for leaky_relu. See
  212. :func:`calculate_gain` for details.
  213. mode: fan_in" or "fan_out", used to calculate :math:`gain`, the
  214. scaling factor for :math:`gain`. See :func:`calculate_fan_in_and_fan_out` for
  215. details.
  216. nonlinearity: name of the non-linear function used to calculate :math:`gain`.
  217. See :func:`calculate_gain` for details.
  218. """
  219. fan = calculate_correct_fan(tensor, mode)
  220. gain = calculate_gain(nonlinearity, a)
  221. std = gain / math.sqrt(fan)
  222. normal_(tensor, 0, std)