You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

linear.py 2.6 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. import numpy as np
  9. from ..functional.nn import linear
  10. from ..tensor import Parameter
  11. from . import init
  12. from .module import Module
  13. class Linear(Module):
  14. r"""Applies a linear transformation to the input. For instance, if input
  15. is x, then output y is:
  16. .. math::
  17. y = xW^T + b
  18. where :math:`y_i= \sum_j W_{ij} x_j + b_i`
  19. Args:
  20. in_features: size of each input sample.
  21. out_features: size of each output sample.
  22. bias: if it's ``False``, the layer will not learn an additional ``bias``.
  23. Default: ``True``
  24. Examples:
  25. .. testcode::
  26. import numpy as np
  27. import megengine as mge
  28. import megengine.module as M
  29. m = M.Linear(in_features=3, out_features=1)
  30. inp = mge.tensor(np.arange(0, 6).astype("float32").reshape(2, 3))
  31. oup = m(inp)
  32. print(oup.numpy().shape)
  33. Outputs:
  34. .. testoutput::
  35. (2, 1)
  36. """
  37. def __init__(
  38. self,
  39. in_features: int,
  40. out_features: int,
  41. bias: bool = True,
  42. compute_mode: str = "default",
  43. **kwargs
  44. ):
  45. super().__init__(**kwargs)
  46. self.out_features = out_features
  47. self.in_features = in_features
  48. w_shape = (out_features, in_features)
  49. self.weight = Parameter(np.zeros(w_shape, dtype=np.float32))
  50. self.bias = None
  51. if bias:
  52. b_shape = (out_features,)
  53. self.bias = Parameter(np.zeros(b_shape, dtype=np.float32))
  54. self.compute_mode = compute_mode
  55. self.reset_parameters()
  56. def _get_fanin(self):
  57. return self.in_features
  58. def reset_parameters(self) -> None:
  59. fanin = self._get_fanin()
  60. std = np.sqrt(1 / fanin)
  61. init.normal_(self.weight, 0.0, std)
  62. if self.bias is not None:
  63. init.zeros_(self.bias)
  64. def _calc_linear(self, x, weight, bias):
  65. return linear(x, weight, bias, compute_mode=self.compute_mode)
  66. def forward(self, x):
  67. return self._calc_linear(x, self.weight, self.bias)
  68. def _module_info_string(self) -> str:
  69. return "in_features={}, out_features={}, bias={}".format(
  70. self.in_features, self.out_features, self.bias is not None
  71. )