You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

adaptive_pooling.py 2.8 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. # -*- coding: utf-8 -*-
  2. from abc import abstractmethod
  3. from typing import Tuple, Union
  4. from ..functional import adaptive_avg_pool2d, adaptive_max_pool2d
  5. from ..tensor import Parameter, Tensor
  6. from .module import Module
  7. class _AdaptivePoolNd(Module):
  8. def __init__(self, oshp: Union[Tuple[int, int], int, Tensor], **kwargs):
  9. super(_AdaptivePoolNd, self).__init__(**kwargs)
  10. self.oshp = oshp
  11. @abstractmethod
  12. def forward(self, inp):
  13. pass
  14. class AdaptiveMaxPool2d(_AdaptivePoolNd):
  15. r"""Applies a 2D max adaptive pooling over an input.
  16. For instance, given an input of the size :math:`(N, C, H, W)` and
  17. an output shape :math:`(OH, OW)`, this layer generates the output of
  18. the size :math:`(N, C, OH, OW)` through a process described as:
  19. .. math::
  20. \begin{aligned}
  21. out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1}
  22. \text{input}(N_i, C_j, \text{stride[0]} \times h + m,
  23. \text{stride[1]} \times w + n)
  24. \end{aligned}
  25. ``kernel_size`` and ``stride`` can be inferred from input shape and out shape:
  26. * padding: (0, 0)
  27. * stride: (floor(IH / OH), floor(IW / OW))
  28. * kernel_size: (IH - (OH - 1) * stride_h, IW - (OW - 1) * stride_w)
  29. Examples:
  30. >>> import numpy as np
  31. >>> m = M.AdaptiveMaxPool2d((2, 2))
  32. >>> inp = mge.tensor(np.arange(0, 16).astype("float32").reshape(1, 1, 4, 4))
  33. >>> oup = m(inp)
  34. >>> oup.numpy()
  35. array([[[[ 5., 7.],
  36. [13., 15.]]]], dtype=float32)
  37. """
  38. def forward(self, inp):
  39. return adaptive_max_pool2d(inp, self.oshp)
  40. class AdaptiveAvgPool2d(_AdaptivePoolNd):
  41. r"""Applies a 2D average pooling over an input.
  42. For instance, given an input of the size :math:`(N, C, H, W)` and
  43. an output shape :math:`(OH, OW)`, this layer generates the output of
  44. the size :math:`(N, C, OH, OW)` through a process described as:
  45. .. math::
  46. out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
  47. input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
  48. ``kernel_size`` and ``stride`` can be inferred from input shape and out shape:
  49. * padding: (0, 0)
  50. * stride: (floor(IH / OH), floor(IW / OW))
  51. * kernel_size: (IH - (OH - 1) * stride_h, IW - (OW - 1) * stride_w)
  52. Examples:
  53. >>> import numpy as np
  54. >>> m = M.AdaptiveAvgPool2d((2, 2))
  55. >>> inp = mge.tensor(np.arange(0, 16).astype("float32").reshape(1, 1, 4, 4))
  56. >>> oup = m(inp)
  57. >>> oup.numpy()
  58. array([[[[ 2.5, 4.5],
  59. [10.5, 12.5]]]], dtype=float32)
  60. """
  61. def forward(self, inp):
  62. return adaptive_avg_pool2d(inp, self.oshp)