You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

init.py 9.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import math
  10. from functools import reduce
  11. from typing import Optional, Tuple, Union
  12. import numpy as np
  13. from ..core import Tensor
  14. def fill_(tensor: Tensor, val: Union[float, int]) -> None:
  15. """Fill the given ``tensor`` with value ``val``.
  16. :param tensor: An n-dimentional tensor to be initialized
  17. :param val: The value to be filled throughout the tensor
  18. """
  19. tensor.set_value(np.full(tensor.shape, val, tensor.dtype))
  20. def zeros_(tensor: Tensor) -> None:
  21. """Fill the given ``tensor`` with scalar value `0`.
  22. :param tensor: An n-dimentional tensor to be initialized
  23. """
  24. fill_(tensor, 0)
  25. def ones_(tensor: Tensor) -> None:
  26. """Fill the given ``tensor`` with the scalar value `1`.
  27. :param tensor: An n-dimentional tensor to be initialized
  28. """
  29. fill_(tensor, 1)
  30. def uniform_(tensor: Tensor, a: float = 0.0, b: float = 1.0) -> None:
  31. r"""Fill the given ``tensor`` with random value sampled from uniform distribution
  32. :math:`\mathcal{U}(\text{a}, \text{b})`.
  33. :param tensor: An n-dimentional tensor to be initialized
  34. :param a: Lower bound of the sampling interval
  35. :param b: Upper bound of the sampling interval
  36. """
  37. tensor.set_value(np.random.uniform(a, b, tensor.shape).astype(tensor.dtype))
  38. def normal_(tensor: Tensor, mean: float = 0.0, std: float = 1.0) -> None:
  39. r"""Fill the given ``tensor`` with random value sampled from normal distribution
  40. :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
  41. :param tensor: An n-dimentional tensor to be initialized
  42. :param mean: The mean of the normal distribution
  43. :param std: The standard deviation of the normal distribution
  44. """
  45. tensor.set_value(np.random.normal(mean, std, tensor.shape).astype(np.float32))
  46. def calculate_gain(
  47. nonlinearity: str, param: Optional[Union[int, float]] = None
  48. ) -> float:
  49. r"""Return a recommended gain value (see the table below) for the given nonlinearity
  50. function.
  51. ================= ====================================================
  52. nonlinearity gain
  53. ================= ====================================================
  54. Linear / Identity :math:`1`
  55. Conv{1,2,3}D :math:`1`
  56. Sigmoid :math:`1`
  57. Tanh :math:`\frac{5}{3}`
  58. ReLU :math:`\sqrt{2}`
  59. Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative_{slope}}^2}}`
  60. ================= ====================================================
  61. :param nonlinearity: Name of the non-linear function
  62. :param param: Optional parameter for leaky_relu. Only effective when
  63. ``nonlinearity`` is "leaky_relu".
  64. """
  65. linear_fns = [
  66. "linear",
  67. "conv1d",
  68. "conv2d",
  69. "conv3d",
  70. "conv_transpose1d",
  71. "conv_transpose2d",
  72. "conv_transpose3d",
  73. ]
  74. if nonlinearity in linear_fns or nonlinearity == "sigmoid":
  75. return 1
  76. if nonlinearity == "tanh":
  77. return 5.0 / 3
  78. if nonlinearity == "relu":
  79. return math.sqrt(2.0)
  80. if nonlinearity == "leaky_relu":
  81. if param is None:
  82. negative_slope = 0.01
  83. elif (
  84. not isinstance(param, bool)
  85. and isinstance(param, int)
  86. or isinstance(param, float)
  87. ):
  88. # True/False are instances of int, hence check above
  89. negative_slope = param
  90. else:
  91. raise ValueError("negative_slope {} not a valid number".format(param))
  92. return math.sqrt(2.0 / (1 + negative_slope ** 2))
  93. raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
  94. def calculate_fan_in_and_fan_out(tensor: Tensor) -> Tuple[float, float]:
  95. """
  96. Calculate fan_in / fan_out value for given weight tensor. This function assumes
  97. input tensor is stored in NCHW format.
  98. :param tensor: Weight tensor in NCHW format
  99. """
  100. shape = tensor.shape
  101. ndim = len(shape)
  102. if ndim < 2:
  103. raise ValueError(
  104. "fan_in and fan_out can not be computed for tensor with fewer than 2 "
  105. "dimensions"
  106. )
  107. if ndim == 2: # Linear
  108. fan_in = shape[1]
  109. fan_out = shape[0]
  110. else:
  111. num_input_fmaps = shape[1]
  112. num_output_fmaps = shape[0]
  113. receptive_field_size = 1
  114. if ndim > 2:
  115. receptive_field_size = reduce(lambda x, y: x * y, shape[2:], 1)
  116. fan_in = num_input_fmaps * receptive_field_size
  117. fan_out = num_output_fmaps * receptive_field_size
  118. return fan_in, fan_out
  119. def calculate_correct_fan(tensor: Tensor, mode: str) -> float:
  120. """
  121. Calculate fan_in or fan_out value for given weight tensor, depending on given
  122. ``mode``.
  123. See :func:`calculate_fan_in_and_fan_out` for details.
  124. :param tensor: Weight tensor in NCHW format
  125. :param mode: ``'fan_in'`` or ``'fan_out'``
  126. """
  127. mode = mode.lower()
  128. valid_modes = ["fan_in", "fan_out"]
  129. if mode not in valid_modes:
  130. raise ValueError(
  131. "Mode {} not supported, please use one of {}".format(mode, valid_modes)
  132. )
  133. fan_in, fan_out = calculate_fan_in_and_fan_out(tensor)
  134. return fan_in if mode == "fan_in" else fan_out
  135. def xavier_uniform_(tensor: Tensor, gain: float = 1.0) -> None:
  136. r"""Fill ``tensor`` with random values sampled from :math:`\mathcal{U}(-a, a)`
  137. where
  138. .. math::
  139. a = \text{gain} \times \sqrt{\frac{6}{\text{fan_in} + \text{fan_out}}}
  140. Also known as Glorot initialization. Detailed information can be retrieved from
  141. `Understanding the difficulty of training deep feedforward neural networks` -
  142. Glorot, X. & Bengio, Y. (2010).
  143. :param tensor: An n-dimentional tensor to be initialized
  144. :param gain: Scaling factor for :math:`a`.
  145. """
  146. fan_in, fan_out = calculate_fan_in_and_fan_out(tensor)
  147. std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
  148. a = math.sqrt(3.0) * std
  149. uniform_(tensor, -a, a)
  150. def xavier_normal_(tensor: Tensor, gain: float = 1.0) -> None:
  151. r"""Fill ``tensor`` with random values sampled from
  152. :math:`\mathcal{N}(0, \text{std}^2)` where
  153. .. math::
  154. \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan_in} + \text{fan_out}}}
  155. Also known as Glorot initialization. Detailed information can be retrieved from
  156. `Understanding the difficulty of training deep feedforward neural networks` -
  157. Glorot, X. & Bengio, Y. (2010).
  158. :param tensor: An n-dimentional tensor to be initialized
  159. :param gain: Scaling factor for :math:`std`.
  160. """
  161. fan_in, fan_out = calculate_fan_in_and_fan_out(tensor)
  162. std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
  163. normal_(tensor, 0.0, std)
  164. def msra_uniform_(
  165. tensor: Tensor, a: float = 0, mode: str = "fan_in", nonlinearity: str = "leaky_relu"
  166. ) -> None:
  167. r"""Fill ``tensor`` wilth random values sampled from
  168. :math:`\mathcal{U}(-\text{bound}, \text{bound})` where
  169. .. math::
  170. \text{bound} = \sqrt{\frac{6}{(1 + a^2) \times \text{fan_in}}}
  171. Detailed information can be retrieved from
  172. `Delving deep into rectifiers: Surpassing human-level performance on ImageNet
  173. classification`
  174. :param tensor: An n-dimentional tensor to be initialized
  175. :param a: Optional parameter for calculating gain for leaky_relu. See
  176. :func:`calculate_gain` for details.
  177. :param mode: ``'fan_in'`` or ``'fan_out'``, used to calculate :math:`gain`, the
  178. scaling factor for :math:`bound`. See :func:`calculate_fan_in_and_fan_out` for
  179. details.
  180. :param nonlinearity: Name of the non-linear function used to calculate :math:`gain`.
  181. See :func:`calculate_gain` for details.
  182. """
  183. fan = calculate_correct_fan(tensor, mode)
  184. gain = calculate_gain(nonlinearity, a)
  185. std = gain / math.sqrt(fan)
  186. bound = math.sqrt(3.0) * std
  187. uniform_(tensor, -bound, bound)
  188. def msra_normal_(
  189. tensor: Tensor, a: float = 0, mode: str = "fan_in", nonlinearity: str = "leaky_relu"
  190. ) -> None:
  191. r"""Fill ``tensor`` wilth random values sampled from
  192. :math:`\mathcal{N}(0, \text{std}^2)` where
  193. .. math::
  194. \text{std} = \sqrt{\frac{2}{(1 + a^2) \times \text{fan_in}}}
  195. Detailed information can be retrieved from
  196. `Delving deep into rectifiers: Surpassing human-level performance on ImageNet
  197. classification`
  198. :param tensor: An n-dimentional tensor to be initialized
  199. :param a: Optional parameter for calculating gain for leaky_relu. See
  200. :func:`calculate_gain` for details.
  201. :param mode: ``'fan_in'`` or ``'fan_out'``, used to calculate :math:`gain`, the
  202. scaling factor for :math:`gain`. See :func:`calculate_fan_in_and_fan_out` for
  203. details.
  204. :param nonlinearity: Name of the non-linear function used to calculate :math:`gain`.
  205. See :func:`calculate_gain` for details.
  206. """
  207. fan = calculate_correct_fan(tensor, mode)
  208. gain = calculate_gain(nonlinearity, a)
  209. std = gain / math.sqrt(fan)
  210. normal_(tensor, 0, std)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台