You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

linear.py 2.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. import numpy as np
  9. from ..functional.nn import linear
  10. from ..tensor import Parameter
  11. from . import init
  12. from .module import Module
  13. class Linear(Module):
  14. r"""Applies a linear transformation to the input. For instance, if input
  15. is x, then output y is:
  16. .. math::
  17. y = xW^T + b
  18. where :math:`y_i= \sum_j W_{ij} x_j + b_i`
  19. Args:
  20. in_features: size of each input sample.
  21. out_features: size of each output sample.
  22. bias: if it's ``False``, the layer will not learn an additional ``bias``.
  23. Default: ``True``
  24. Examples:
  25. >>> import numpy as np
  26. >>> m = M.Linear(in_features=3, out_features=1)
  27. >>> inp = mge.tensor(np.arange(0, 6).astype("float32").reshape(2, 3))
  28. >>> oup = m(inp)
  29. >>> oup.numpy().shape
  30. (2, 1)
  31. """
  32. def __init__(
  33. self,
  34. in_features: int,
  35. out_features: int,
  36. bias: bool = True,
  37. compute_mode: str = "default",
  38. **kwargs
  39. ):
  40. super().__init__(**kwargs)
  41. self.out_features = out_features
  42. self.in_features = in_features
  43. w_shape = (out_features, in_features)
  44. self.weight = Parameter(np.zeros(w_shape, dtype=np.float32))
  45. self.bias = None
  46. if bias:
  47. b_shape = (out_features,)
  48. self.bias = Parameter(np.zeros(b_shape, dtype=np.float32))
  49. self.compute_mode = compute_mode
  50. self.reset_parameters()
  51. def _get_fanin(self):
  52. return self.in_features
  53. def reset_parameters(self) -> None:
  54. fanin = self._get_fanin()
  55. std = np.sqrt(1 / fanin)
  56. init.normal_(self.weight, 0.0, std)
  57. if self.bias is not None:
  58. init.zeros_(self.bias)
  59. def _calc_linear(self, x, weight, bias):
  60. return linear(x, weight, bias, compute_mode=self.compute_mode)
  61. def forward(self, x):
  62. return self._calc_linear(x, self.weight, self.bias)
  63. def _module_info_string(self) -> str:
  64. return "in_features={}, out_features={}, bias={}".format(
  65. self.in_features, self.out_features, self.bias is not None
  66. )