You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

batchnorm.py 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. from typing import Optional
  10. import numpy as np
  11. from ..distributed.group import WORLD, Group
  12. from ..functional.nn import batch_norm, sync_batch_norm
  13. from ..tensor import Parameter, Tensor
  14. from . import init
  15. from .module import Module
  16. class _BatchNorm(Module):
  17. def __init__(
  18. self,
  19. num_features,
  20. eps=1e-5,
  21. momentum=0.9,
  22. affine=True,
  23. track_running_stats=True,
  24. freeze=False,
  25. compute_mode="default",
  26. param_dim="dim_1c11",
  27. **kwargs
  28. ):
  29. super(_BatchNorm, self).__init__(**kwargs)
  30. self.num_features = num_features
  31. self.eps = eps
  32. self.momentum = momentum
  33. self.affine = affine
  34. self.track_running_stats = track_running_stats
  35. self._track_running_stats_saved = track_running_stats
  36. self.freeze = freeze
  37. self.compute_mode = compute_mode
  38. self.param_dim = param_dim
  39. if self.freeze:
  40. assert (
  41. self._track_running_stats_saved
  42. ), "track_running_stats must be initilized to True if freeze is True"
  43. tshape = (1, self.num_features, 1, 1)
  44. if self.affine:
  45. self.weight = Parameter(np.ones(tshape, dtype=np.float32))
  46. self.bias = Parameter(np.zeros(tshape, dtype=np.float32))
  47. else:
  48. self.weight = None
  49. self.bias = None
  50. if self.track_running_stats:
  51. self.running_mean = Tensor(np.zeros(tshape, dtype=np.float32))
  52. self.running_var = Tensor(np.ones(tshape, dtype=np.float32))
  53. else:
  54. self.running_mean = None
  55. self.running_var = None
  56. def reset_running_stats(self) -> None:
  57. if self.track_running_stats:
  58. init.zeros_(self.running_mean)
  59. init.ones_(self.running_var)
  60. def reset_parameters(self) -> None:
  61. self.reset_running_stats()
  62. if self.affine:
  63. init.ones_(self.weight)
  64. init.zeros_(self.bias)
  65. def _check_input_ndim(self, inp):
  66. raise NotImplementedError
  67. def forward(self, inp):
  68. self._check_input_ndim(inp)
  69. if self._track_running_stats_saved == False:
  70. assert (
  71. self.track_running_stats == False
  72. ), "track_running_stats can not be initilized to False and changed to True later"
  73. _weight = self.weight
  74. _bias = self.bias
  75. if self.freeze:
  76. if _weight is not None:
  77. _weight = _weight.detach()
  78. if _bias is not None:
  79. _bias = _bias.detach()
  80. # fastpath excution for freeze
  81. scale = (self.running_var + self.eps) ** (-0.5)
  82. if _weight is not None:
  83. scale *= _weight
  84. bias = -self.running_mean * scale
  85. if _bias is not None:
  86. bias += _bias
  87. return inp * scale + bias
  88. if self.training and self.track_running_stats:
  89. exponential_average_factor = self.momentum
  90. else:
  91. exponential_average_factor = 0.0 # useless
  92. output = batch_norm(
  93. inp,
  94. self.running_mean if self.track_running_stats else None,
  95. self.running_var if self.track_running_stats else None,
  96. _weight,
  97. _bias,
  98. training=self.training
  99. or ((self.running_mean is None) and (self.running_var is None)),
  100. momentum=exponential_average_factor,
  101. eps=self.eps,
  102. compute_mode=self.compute_mode,
  103. param_dim=self.param_dim,
  104. )
  105. return output
  106. def _module_info_string(self) -> str:
  107. s = (
  108. "{num_features}, eps={eps}, momentum={momentum}, affine={affine}, "
  109. "track_running_stats={track_running_stats}"
  110. )
  111. return s.format(**self.__dict__)
  112. class SyncBatchNorm(_BatchNorm):
  113. r"""Applies Synchronized Batch Normalization for distributed training.
  114. Args:
  115. num_features: usually :math:`C` from an input of shape
  116. :math:`(N, C, H, W)` or the highest ranked dimension of an input
  117. less than 4D.
  118. eps: a value added to the denominator for numerical stability.
  119. Default: 1e-5
  120. momentum: the value used for the ``running_mean`` and ``running_var`` computation.
  121. Default: 0.9
  122. affine: a boolean value that when set to True, this module has
  123. learnable affine parameters. Default: True
  124. track_running_stats: when set to True, this module tracks the
  125. running mean and variance. When set to False, this module does not
  126. track such statistics and always uses batch statistics in both training
  127. and eval modes. Default: True
  128. freeze: when set to True, this module does not update the
  129. running mean and variance, and uses the running mean and variance instead of
  130. the batch mean and batch variance to normalize the input. The parameter takes effect
  131. only when the module is initilized with track_running_stats as True.
  132. Default: False
  133. group: communication group, caculate mean and variance between this group.
  134. Default: :obj:`~.distributed.WORLD`
  135. """
  136. def __init__(
  137. self,
  138. num_features,
  139. eps=1e-5,
  140. momentum=0.9,
  141. affine=True,
  142. track_running_stats=True,
  143. freeze=False,
  144. group: Optional[Group] = WORLD,
  145. **kwargs
  146. ) -> None:
  147. super().__init__(
  148. num_features, eps, momentum, affine, track_running_stats, freeze, **kwargs
  149. )
  150. self.group = group
  151. def _check_input_ndim(self, inp):
  152. if len(inp.shape) not in {2, 3, 4}:
  153. raise ValueError(
  154. "expected 2D, 3D or 4D input (got {}D input)".format(len(inp.shape))
  155. )
  156. def forward(self, inp):
  157. self._check_input_ndim(inp)
  158. inp_shape = inp.shape
  159. _ndims = len(inp_shape)
  160. if _ndims != 4:
  161. new_shape = Tensor([1, 1, 1, 1], device=inp.device)
  162. origin_shape = inp_shape
  163. if _ndims == 2:
  164. new_shape[:2] = origin_shape[:2]
  165. elif _ndims == 3:
  166. new_shape[:3] = origin_shape[:3]
  167. else:
  168. raise ValueError(
  169. "expected 2D, 3D or 4D input (got {}D input)".format(len(inp_shape))
  170. )
  171. inp = inp.reshape(new_shape)
  172. if self.training and self.track_running_stats:
  173. exponential_average_factor = self.momentum
  174. else:
  175. exponential_average_factor = 0.0 # useless
  176. _weight = self.weight
  177. _bias = self.bias
  178. if self.freeze:
  179. if _weight is not None:
  180. _weight = _weight.detach()
  181. if _bias is not None:
  182. _bias = _bias.detach()
  183. output = sync_batch_norm(
  184. inp,
  185. self.running_mean,
  186. self.running_var,
  187. _weight,
  188. _bias,
  189. training=(self.training and not self.freeze)
  190. or ((self.running_mean is None) and (self.running_var is None)),
  191. momentum=exponential_average_factor,
  192. eps=self.eps,
  193. group=self.group,
  194. )
  195. if _ndims != 4:
  196. output = output.reshape(origin_shape)
  197. return output
  198. class BatchNorm1d(_BatchNorm):
  199. r"""Applies Batch Normalization over a 2D/3D tensor.
  200. Refer to :class:`~.BatchNorm2d` for more information.
  201. """
  202. def _check_input_ndim(self, inp):
  203. if len(inp.shape) not in {2, 3}:
  204. raise ValueError(
  205. "expected 2D or 3D input (got {}D input)".format(len(inp.shape))
  206. )
  207. class BatchNorm2d(_BatchNorm):
  208. r"""Applies Batch Normalization over a 4D tensor.
  209. .. math::
  210. y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
  211. The mean and standard-deviation are calculated per-dimension over
  212. the mini-batches and :math:`\gamma` and :math:`\beta` are learnable
  213. parameter vectors.
  214. By default, during training this layer keeps running estimates of its
  215. computed mean and variance, which are then used for normalization during
  216. evaluation. The running estimates are kept with a default :attr:`momentum`
  217. of 0.9.
  218. If :attr:`track_running_stats` is set to ``False``, this layer will not
  219. keep running estimates, batch statistics is used during
  220. evaluation time instead.
  221. Because the Batch Normalization is done over the `C` dimension, computing
  222. statistics on `(N, H, W)` slices, it's common terminology to call this
  223. Spatial Batch Normalization.
  224. .. note::
  225. The update formula for ``running_mean`` and ``running_var`` (taking ``running_mean`` as an example) is
  226. .. math::
  227. \textrm{running_mean} = \textrm{momentum} \times \textrm{running_mean} + (1 - \textrm{momentum}) \times \textrm{batch_mean}
  228. which could be defined differently in other frameworks. Most notably, ``momentum`` of 0.1 in PyTorch
  229. is equivalent to ``mementum`` of 0.9 here.
  230. Args:
  231. num_features: usually :math:`C` from an input of shape
  232. :math:`(N, C, H, W)` or the highest ranked dimension of an input
  233. less than 4D.
  234. eps: a value added to the denominator for numerical stability.
  235. Default: 1e-5
  236. momentum: the value used for the ``running_mean`` and ``running_var`` computation.
  237. Default: 0.9
  238. affine: a boolean value that when set to True, this module has
  239. learnable affine parameters. Default: True
  240. track_running_stats: when set to True, this module tracks the
  241. running mean and variance. When set to False, this module does not
  242. track such statistics and always uses batch statistics in both training
  243. and eval modes. Default: True
  244. freeze: when set to True, this module does not update the
  245. running mean and variance, and uses the running mean and variance instead of
  246. the batch mean and batch variance to normalize the input. The parameter takes effect
  247. only when the module is initilized with track_running_stats as True.
  248. Default: False
  249. Examples:
  250. .. testcode::
  251. import numpy as np
  252. import megengine as mge
  253. import megengine.module as M
  254. # With Learnable Parameters
  255. m = M.BatchNorm2d(4)
  256. inp = mge.tensor(np.random.rand(1, 4, 3, 3).astype("float32"))
  257. oup = m(inp)
  258. print(m.weight.numpy().flatten(), m.bias.numpy().flatten())
  259. # Without L`e`arnable Parameters
  260. m = M.BatchNorm2d(4, affine=False)
  261. oup = m(inp)
  262. print(m.weight, m.bias)
  263. Outputs:
  264. .. testoutput::
  265. [1. 1. 1. 1.] [0. 0. 0. 0.]
  266. None None
  267. """
  268. def _check_input_ndim(self, inp):
  269. if len(inp.shape) != 4:
  270. raise ValueError("expected 4D input (got {}D input)".format(len(inp.shape)))