You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

lrn.py 2.1 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. from typing import Tuple, Union
  10. from ..functional import local_response_norm
  11. from .module import Module
  12. class LocalResponseNorm(Module):
  13. r"""
  14. Apply local response normalization to the input tensor.
  15. Args:
  16. kernel_size: the size of the kernel to apply LRN on.
  17. k: hyperparameter k. The default vaule is 2.0.
  18. alpha: hyperparameter alpha. The default value is 1e-4.
  19. beta: hyperparameter beta. The default value is 0.75.
  20. Example:
  21. >>> import numpy as np
  22. >>> inp = Tensor(np.arange(25, dtype=np.float32).reshape(1,1,5,5))
  23. >>> GT = np.array([[[[ 0., 0.999925, 1.9994003, 2.9979765, 3.9952066],
  24. ... [ 4.9906454, 5.983851, 6.974385, 7.961814, 8.945709 ],
  25. ... [ 9.925651, 10.90122, 11.872011, 12.837625, 13.7976675],
  26. ... [14.751757, 15.699524, 16.640602, 17.574642, 18.501305 ],
  27. ... [19.420258, 20.331186, 21.233786, 22.127764, 23.012836 ]]]])
  28. >>> op = M.LocalResponseNorm(kernel_size=3, k=1.0, alpha=1e-4, beta=0.75)
  29. >>> out = op(inp)
  30. >>> np.testing.assert_allclose(GT, out.numpy(), rtol=1e-6, atol=1e-6)
  31. """
  32. def __init__(
  33. self,
  34. kernel_size: int = 5,
  35. k: float = 2.0,
  36. alpha: float = 1e-4,
  37. beta: float = 0.75,
  38. **kwargs
  39. ):
  40. super(LocalResponseNorm, self).__init__(**kwargs)
  41. self.kernel_size = kernel_size
  42. self.k = k
  43. self.alpha = alpha
  44. self.beta = beta
  45. def forward(self, inp):
  46. return local_response_norm(inp, self.kernel_size, self.k, self.alpha, self.beta)