Browse Source

feat(mge): do not export F.nn.linear

GitOrigin-RevId: 589964729d
release-1.1
Megvii Engine Team 4 years ago
parent
commit
164d9cdf1f
3 changed files with 2 additions and 3 deletions
  1. +0
    -1
      imperative/python/megengine/functional/nn.py
  2. +1
    -1
      imperative/python/megengine/module/linear.py
  3. +1
    -1
      imperative/python/megengine/module/quantized/linear.py

+ 0
- 1
imperative/python/megengine/functional/nn.py View File

@@ -37,7 +37,6 @@ __all__ = [
"dropout", "dropout",
"indexing_one_hot", "indexing_one_hot",
"leaky_relu", "leaky_relu",
"linear",
"local_conv2d", "local_conv2d",
"logsigmoid", "logsigmoid",
"logsumexp", "logsumexp",


+ 1
- 1
imperative/python/megengine/module/linear.py View File

@@ -7,7 +7,7 @@
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np import numpy as np


from ..functional import linear
from ..functional.nn import linear
from ..tensor import Parameter from ..tensor import Parameter
from . import init from . import init
from .module import Module from .module import Module


+ 1
- 1
imperative/python/megengine/module/quantized/linear.py View File

@@ -31,7 +31,7 @@ class Linear(QuantizedModule):
inp_scale = dtype.get_scale(inp.dtype) inp_scale = dtype.get_scale(inp.dtype)
w_scale = dtype.get_scale(self.weight.dtype) w_scale = dtype.get_scale(self.weight.dtype)
bias_dtype = dtype.qint32(inp_scale * w_scale) bias_dtype = dtype.qint32(inp_scale * w_scale)
return F.linear(
return F.nn.linear(
inp, inp,
self.weight, self.weight,
None if self.bias is None else self.bias.astype(bias_dtype), None if self.bias is None else self.bias.astype(bias_dtype),


Loading…
Cancel
Save