You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

batchnorm.py 9.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. from typing import Optional
  10. import numpy as np
  11. from ..distributed.group import WORLD, Group
  12. from ..functional import batch_norm2d, sync_batch_norm
  13. from ..tensor import Parameter, Tensor
  14. from . import init
  15. from .module import Module
  16. class _BatchNorm(Module):
  17. def __init__(
  18. self,
  19. num_features,
  20. eps=1e-5,
  21. momentum=0.9,
  22. affine=True,
  23. track_running_stats=True,
  24. freeze=False,
  25. ):
  26. super(_BatchNorm, self).__init__()
  27. self.num_features = num_features
  28. self.eps = eps
  29. self.momentum = momentum
  30. self.affine = affine
  31. self.track_running_stats = track_running_stats
  32. self._track_running_stats_saved = track_running_stats
  33. self.freeze = freeze
  34. if self.affine:
  35. self.weight = Parameter(np.ones(num_features, dtype=np.float32))
  36. self.bias = Parameter(np.zeros(num_features, dtype=np.float32))
  37. else:
  38. self.weight = None
  39. self.bias = None
  40. tshape = (1, self.num_features, 1, 1)
  41. if self.track_running_stats:
  42. self.running_mean = Tensor(np.zeros(tshape, dtype=np.float32))
  43. self.running_var = Tensor(np.ones(tshape, dtype=np.float32))
  44. else:
  45. self.running_mean = None
  46. self.running_var = None
  47. def reset_running_stats(self) -> None:
  48. if self.track_running_stats:
  49. init.zeros_(self.running_mean)
  50. init.ones_(self.running_var)
  51. def reset_parameters(self) -> None:
  52. self.reset_running_stats()
  53. if self.affine:
  54. init.ones_(self.weight)
  55. init.zeros_(self.bias)
  56. def _check_input_ndim(self, inp):
  57. raise NotImplementedError
  58. def forward(self, inp):
  59. self._check_input_ndim(inp)
  60. if self._track_running_stats_saved == False:
  61. assert (
  62. self.track_running_stats == False
  63. ), "track_running_stats can not be initilized to False and changed to True later"
  64. _ndims = len(inp.shape)
  65. if _ndims != 4:
  66. origin_shape = inp.shape
  67. if _ndims == 2:
  68. n, c = inp.shape[0], inp.shape[1]
  69. new_shape = (n, c, 1, 1)
  70. elif _ndims == 3:
  71. n, c, h = inp.shape[0], inp.shape[1], inp.shape[2]
  72. new_shape = (n, c, h, 1)
  73. inp = inp.reshape(new_shape)
  74. if self.freeze and self.training and self._track_running_stats_saved:
  75. scale = self.weight.reshape(1, -1, 1, 1) * (
  76. self.running_var + self.eps
  77. ) ** (-0.5)
  78. bias = self.bias.reshape(1, -1, 1, 1) - self.running_mean * scale
  79. return inp * scale.detach() + bias.detach()
  80. if self.training and self.track_running_stats:
  81. exponential_average_factor = self.momentum
  82. else:
  83. exponential_average_factor = 0.0 # useless
  84. output = batch_norm2d(
  85. inp,
  86. self.running_mean if self.track_running_stats else None,
  87. self.running_var if self.track_running_stats else None,
  88. self.weight,
  89. self.bias,
  90. training=self.training
  91. or ((self.running_mean is None) and (self.running_var is None)),
  92. momentum=exponential_average_factor,
  93. eps=self.eps,
  94. )
  95. if _ndims != 4:
  96. output = output.reshape(origin_shape)
  97. return output
  98. class SyncBatchNorm(_BatchNorm):
  99. r"""
  100. Applies Synchronization Batch Normalization.
  101. """
  102. def __init__(
  103. self,
  104. num_features,
  105. eps=1e-5,
  106. momentum=0.9,
  107. affine=True,
  108. track_running_stats=True,
  109. freeze=False,
  110. group: Optional[Group] = WORLD,
  111. ) -> None:
  112. super().__init__(
  113. num_features, eps, momentum, affine, track_running_stats, freeze
  114. )
  115. self.group = group
  116. def _check_input_ndim(self, inp):
  117. if len(inp.shape) not in {2, 3, 4}:
  118. raise ValueError(
  119. "expected 2D, 3D or 4D input (got {}D input)".format(len(inp.shape))
  120. )
  121. def forward(self, inp):
  122. self._check_input_ndim(inp)
  123. _ndims = len(inp.shape)
  124. if _ndims != 4:
  125. new_shape = Tensor([1, 1, 1, 1], device=inp.device)
  126. origin_shape = inp.shape
  127. if _ndims == 2:
  128. new_shape[:2] = origin_shape[:2]
  129. elif _ndims == 3:
  130. new_shape[:3] = origin_shape[:3]
  131. else:
  132. raise ValueError(
  133. "expected 2D, 3D or 4D input (got {}D input)".format(len(inp.shape))
  134. )
  135. inp = inp.reshape(new_shape)
  136. if self.training and self.track_running_stats:
  137. exponential_average_factor = self.momentum
  138. else:
  139. exponential_average_factor = 0.0 # useless
  140. output = sync_batch_norm(
  141. inp,
  142. self.running_mean,
  143. self.running_var,
  144. self.weight,
  145. self.bias,
  146. self.training or not self.track_running_stats,
  147. exponential_average_factor,
  148. self.eps,
  149. group=self.group,
  150. )
  151. if _ndims != 4:
  152. output = output.reshape(origin_shape)
  153. return output
  154. class BatchNorm1d(_BatchNorm):
  155. r"""
  156. Applies Batch Normalization over a 2D/3D tensor.
  157. Refer to :class:`~.BatchNorm2d` for more information.
  158. """
  159. def _check_input_ndim(self, inp):
  160. if len(inp.shape) not in {2, 3}:
  161. raise ValueError(
  162. "expected 2D or 3D input (got {}D input)".format(len(inp.shape))
  163. )
  164. class BatchNorm2d(_BatchNorm):
  165. r"""
  166. Applies Batch Normalization over a 4D tensor.
  167. .. math::
  168. y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
  169. The mean and standard-deviation are calculated per-dimension over
  170. the mini-batches and :math:`\gamma` and :math:`\beta` are learnable
  171. parameter vectors.
  172. By default, during training this layer keeps running estimates of its
  173. computed mean and variance, which are then used for normalization during
  174. evaluation. The running estimates are kept with a default :attr:`momentum`
  175. of 0.9.
  176. If :attr:`track_running_stats` is set to ``False``, this layer will not
  177. keep running estimates, and batch statistics are instead used during
  178. evaluation time.
  179. .. note::
  180. This :attr:`momentum` argument is different from one used in optimizer
  181. classes and the conventional notion of momentum. Mathematically, the
  182. update rule for running statistics here is
  183. :math:`\hat{x}_\text{new} = \text{momentum} \times \hat{x} + (1 - \text{momentum}) \times x_t`,
  184. where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
  185. new observed value.
  186. Because the Batch Normalization is done over the `C` dimension, computing
  187. statistics on `(N, H, W)` slices, it's common terminology to call this
  188. Spatial Batch Normalization.
  189. :type num_features: int
  190. :param num_features: usually the :math:`C` from an input of size
  191. :math:`(N, C, H, W)` or the highest ranked dimension of an input with
  192. less than 4D.
  193. :type eps: float
  194. :param eps: a value added to the denominator for numerical stability.
  195. Default: 1e-5.
  196. :type momentum: float
  197. :param momentum: the value used for the `running_mean` and `running_var`
  198. computation.
  199. Default: 0.9
  200. :type affine: bool
  201. :param affine: a boolean value that when set to ``True``, this module has
  202. learnable affine parameters. Default: ``True``
  203. :type track_running_stats: bool
  204. :param track_running_stats: when set to ``True``, this module tracks the
  205. running mean and variance. When set to ``False``, this module does not
  206. track such statistics and always uses batch statistics in both training
  207. and eval modes. Default: ``True``.
  208. :type freeze: bool
  209. :param freeze: when set to ``True``, this module does not update the
  210. running mean and variance, and uses the running mean and variance instead of
  211. the batch mean and batch variance to normalize the input. The parameter takes effect
  212. only when the module is initilized with ``track_running_stats`` as ``True`` and
  213. the module is in training mode.
  214. Default: ``False``.
  215. Examples:
  216. .. testcode::
  217. import megengine as mge
  218. import megengine.module as M
  219. # With Learnable Parameters
  220. m = M.BatchNorm2d(4)
  221. inp = mge.tensor(np.random.rand(1, 4, 3, 3).astype("float32"))
  222. oup = m(inp)
  223. print(m.weight.numpy(), m.bias.numpy())
  224. # Without Learnable Parameters
  225. m = M.BatchNorm2d(4, affine=False)
  226. oup = m(inp)
  227. print(m.weight, m.bias)
  228. Outputs:
  229. .. testoutput::
  230. [1. 1. 1. 1.] [0. 0. 0. 0.]
  231. None None
  232. """
  233. def _check_input_ndim(self, inp):
  234. if len(inp.shape) != 4:
  235. raise ValueError("expected 4D input (got {}D input)".format(len(inp.shape)))

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台