Browse Source

fix(mge/module): fix quantized fold weight value range limit for fused conv/bn modules

GitOrigin-RevId: 007c2f13b6
release-0.6
Megvii Engine Team 4 years ago
parent
commit
b2f05bf8d8
1 changed files with 1 additions and 0 deletions
  1. +1
    -0
      python_module/megengine/module/qat/conv_bn.py

+ 1
- 0
python_module/megengine/module/qat/conv_bn.py View File

@@ -62,6 +62,7 @@ class _ConvBnActivation2d(Float._ConvBnActivation2d, QATModule):
self.conv.groups, -1, 1, 1, 1
)

w_fold = self.apply_quant_weight(w_fold)
# b_fold = gamma * (b - bn_mean) / bn_std + beta
b_fold = beta + gamma * (conv_bias - bn_mean) * bn_istd
return w_fold, b_fold


Loading…
Cancel
Save