You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

lrn.py 1.7 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. # -*- coding: utf-8 -*-
  2. from typing import Tuple, Union
  3. from ..functional import local_response_norm
  4. from .module import Module
  5. class LocalResponseNorm(Module):
  6. r"""
  7. Apply local response normalization to the input tensor.
  8. Args:
  9. kernel_size: the size of the kernel to apply LRN on.
  10. k: hyperparameter k. The default vaule is 2.0.
  11. alpha: hyperparameter alpha. The default value is 1e-4.
  12. beta: hyperparameter beta. The default value is 0.75.
  13. Example:
  14. >>> import numpy as np
  15. >>> inp = Tensor(np.arange(25, dtype=np.float32).reshape(1,1,5,5))
  16. >>> GT = np.array([[[[ 0., 0.999925, 1.9994003, 2.9979765, 3.9952066],
  17. ... [ 4.9906454, 5.983851, 6.974385, 7.961814, 8.945709 ],
  18. ... [ 9.925651, 10.90122, 11.872011, 12.837625, 13.7976675],
  19. ... [14.751757, 15.699524, 16.640602, 17.574642, 18.501305 ],
  20. ... [19.420258, 20.331186, 21.233786, 22.127764, 23.012836 ]]]])
  21. >>> op = M.LocalResponseNorm(kernel_size=3, k=1.0, alpha=1e-4, beta=0.75)
  22. >>> out = op(inp)
  23. >>> np.testing.assert_allclose(GT, out.numpy(), rtol=1e-6, atol=1e-6)
  24. """
  25. def __init__(
  26. self,
  27. kernel_size: int = 5,
  28. k: float = 2.0,
  29. alpha: float = 1e-4,
  30. beta: float = 0.75,
  31. **kwargs
  32. ):
  33. super(LocalResponseNorm, self).__init__(**kwargs)
  34. self.kernel_size = kernel_size
  35. self.k = k
  36. self.alpha = alpha
  37. self.beta = beta
  38. def forward(self, inp):
  39. return local_response_norm(inp, self.kernel_size, self.k, self.alpha, self.beta)