You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

module_stats.py 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. import contextlib
  9. from functools import partial
  10. import numpy as np
  11. import tabulate
  12. import megengine as mge
  13. import megengine.module as m
  14. import megengine.module.qat as qatm
  15. import megengine.module.quantized as qm
  16. from megengine.core.tensor.dtype import get_dtype_bit
  17. from megengine.functional.tensor import zeros
  18. try:
  19. mge.logger.MegEngineLogFormatter.max_lines = float("inf")
  20. except AttributeError as e:
  21. raise ValueError("set logger max lines failed")
  22. logger = mge.get_logger(__name__)
  23. logger.setLevel("INFO")
  24. _calc_flops_dict = {}
  25. _calc_receptive_field_dict = {}
  26. def _receptive_field_fallback(module, inputs, outputs):
  27. if not _receptive_field_enabled:
  28. return
  29. assert not hasattr(module, "_rf")
  30. assert not hasattr(module, "_stride")
  31. if len(inputs) == 0:
  32. # TODO: support other dimension
  33. module._rf = (1, 1)
  34. module._stride = (1, 1)
  35. return module._rf, module._stride
  36. rf, stride = preprocess_receptive_field(module, inputs, outputs)
  37. module._rf = rf
  38. module._stride = stride
  39. return rf, stride
  40. # key tuple, impl_dict, fallback
  41. _iter_list = [
  42. ("flops_num", _calc_flops_dict, None),
  43. (
  44. ("receptive_field", "stride"),
  45. _calc_receptive_field_dict,
  46. _receptive_field_fallback,
  47. ),
  48. ]
  49. _receptive_field_enabled = False
  50. def _register_dict(*modules, dict=None):
  51. def callback(impl):
  52. for module in modules:
  53. dict[module] = impl
  54. return impl
  55. return callback
  56. def register_flops(*modules):
  57. return _register_dict(*modules, dict=_calc_flops_dict)
  58. def register_receptive_field(*modules):
  59. return _register_dict(*modules, dict=_calc_receptive_field_dict)
  60. def enable_receptive_field():
  61. global _receptive_field_enabled
  62. _receptive_field_enabled = True
  63. def disable_receptive_field():
  64. global _receptive_field_enabled
  65. _receptive_field_enabled = False
  66. @register_flops(
  67. m.Conv1d, m.Conv2d, m.Conv3d, m.ConvTranspose2d, m.LocalConv2d, m.DeformableConv2d
  68. )
  69. def flops_convNd(module: m.Conv2d, inputs, outputs):
  70. bias = 1 if module.bias is not None else 0
  71. # N x Cout x H x W x (Cin x Kw x Kh + bias)
  72. return np.prod(outputs[0].shape) * (
  73. module.in_channels // module.groups * np.prod(module.kernel_size) + bias
  74. )
  75. @register_flops(m.Linear)
  76. def flops_linear(module: m.Linear, inputs, outputs):
  77. bias = module.out_features if module.bias is not None else 0
  78. return np.prod(outputs[0].shape) * module.in_features + bias
  79. @register_flops(m.BatchMatMulActivation)
  80. def flops_batchmatmul(module: m.BatchMatMulActivation, inputs, outputs):
  81. bias = 1 if module.bias is not None else 0
  82. x = inputs[0]
  83. w = module.weight
  84. batch_size = x.shape[0]
  85. n, p = x.shape[1:]
  86. _, m = w.shape[1:]
  87. return n * (p + bias) * m * batch_size
  88. # does not need import qat and quantized module since they inherit from float module.
  89. hook_modules = (
  90. m.conv._ConvNd,
  91. m.Linear,
  92. m.BatchMatMulActivation,
  93. )
  94. def dict2table(list_of_dict, header):
  95. table_data = [header]
  96. for d in list_of_dict:
  97. row = []
  98. for h in header:
  99. v = ""
  100. if h in d:
  101. v = d[h]
  102. row.append(v)
  103. table_data.append(row)
  104. return table_data
  105. def sizeof_fmt(num, suffix="B"):
  106. for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
  107. if abs(num) < 1024.0:
  108. return "{:3.3f} {}{}".format(num, unit, suffix)
  109. num /= 1024.0
  110. sign_str = "-" if num < 0 else ""
  111. return "{}{:.1f} {}{}".format(sign_str, num, "Yi", suffix)
  112. def preprocess_receptive_field(module, inputs, outputs):
  113. # TODO: support other dimensions
  114. pre_rf = (
  115. max(getattr(i.owner, "_rf", (1, 1))[0] for i in inputs),
  116. max(getattr(i.owner, "_rf", (1, 1))[1] for i in inputs),
  117. )
  118. pre_stride = (
  119. max(getattr(i.owner, "_stride", (1, 1))[0] for i in inputs),
  120. max(getattr(i.owner, "_stride", (1, 1))[1] for i in inputs),
  121. )
  122. return pre_rf, pre_stride
  123. def get_op_stats(module, inputs, outputs):
  124. rst = {
  125. "input_shapes": [i.shape for i in inputs],
  126. "output_shapes": [o.shape for o in outputs],
  127. }
  128. valid_flag = False
  129. for key, _dict, fallback in _iter_list:
  130. for _type in _dict:
  131. if isinstance(module, _type):
  132. value = _dict[_type](module, inputs, outputs)
  133. valid_flag = True
  134. break
  135. else:
  136. if fallback is not None:
  137. value = fallback(module, inputs, outputs)
  138. continue
  139. if isinstance(key, tuple):
  140. assert isinstance(value, tuple)
  141. for k, v in zip(key, value):
  142. rst[k] = v
  143. else:
  144. rst[key] = value
  145. if valid_flag:
  146. return rst
  147. else:
  148. return None
  149. return
  150. def print_op_stats(flops, bar_length_max=20):
  151. max_flops_num = max([i["flops_num"] for i in flops] + [0])
  152. total_flops_num = 0
  153. for d in flops:
  154. total_flops_num += int(d["flops_num"])
  155. d["flops_cum"] = sizeof_fmt(total_flops_num, suffix="OPs")
  156. for d in flops:
  157. ratio = d["ratio"] = d["flops_num"] / total_flops_num
  158. d["percentage"] = "{:.2f}%".format(ratio * 100)
  159. bar_length = int(d["flops_num"] / max_flops_num * bar_length_max)
  160. d["bar"] = "#" * bar_length
  161. d["flops"] = sizeof_fmt(d["flops_num"], suffix="OPs")
  162. header = [
  163. "name",
  164. "class_name",
  165. "input_shapes",
  166. "output_shapes",
  167. "flops",
  168. "flops_cum",
  169. "percentage",
  170. "bar",
  171. ]
  172. if _receptive_field_enabled:
  173. header.insert(4, "receptive_field")
  174. header.insert(5, "stride")
  175. total_flops_str = sizeof_fmt(total_flops_num, suffix="OPs")
  176. total_var_size = sum(
  177. sum(s[1] if len(s) > 1 else 0 for s in d["output_shapes"]) for d in flops
  178. )
  179. flops.append(
  180. dict(name="total", flops=total_flops_str, output_shapes=total_var_size)
  181. )
  182. logger.info("flops stats: \n" + tabulate.tabulate(dict2table(flops, header=header)))
  183. return total_flops_num
  184. def get_param_stats(param: np.ndarray):
  185. nbits = get_dtype_bit(param.dtype.name)
  186. shape = param.shape
  187. param_dim = np.prod(param.shape)
  188. param_size = param_dim * nbits // 8
  189. return {
  190. "dtype": param.dtype,
  191. "shape": shape,
  192. "mean": "{:.3g}".format(param.mean()),
  193. "std": "{:.3g}".format(param.std()),
  194. "param_dim": param_dim,
  195. "nbits": nbits,
  196. "size": param_size,
  197. }
  198. def print_param_stats(params, bar_length_max=20):
  199. max_size = max([d["size"] for d in params] + [0])
  200. total_param_dims, total_param_size = 0, 0
  201. for d in params:
  202. total_param_dims += int(d["param_dim"])
  203. total_param_size += int(d["size"])
  204. d["size_cum"] = sizeof_fmt(total_param_size)
  205. for d in params:
  206. ratio = d["size"] / total_param_size
  207. d["ratio"] = ratio
  208. d["percentage"] = "{:.2f}%".format(ratio * 100)
  209. bar_length = int(d["size"] / max_size * bar_length_max)
  210. d["size_bar"] = "#" * bar_length
  211. d["size"] = sizeof_fmt(d["size"])
  212. param_size = sizeof_fmt(total_param_size)
  213. params.append(dict(name="total", param_dim=total_param_dims, size=param_size,))
  214. header = [
  215. "name",
  216. "dtype",
  217. "shape",
  218. "mean",
  219. "std",
  220. "param_dim",
  221. "bits",
  222. "size",
  223. "size_cum",
  224. "percentage",
  225. "size_bar",
  226. ]
  227. logger.info(
  228. "param stats: \n" + tabulate.tabulate(dict2table(params, header=header))
  229. )
  230. return total_param_dims, total_param_size
  231. def print_summary(**kwargs):
  232. data = [["item", "value"]]
  233. data.extend(list(kwargs.items()))
  234. logger.info("summary\n" + tabulate.tabulate(data))
  235. def module_stats(
  236. model: m.Module,
  237. input_size: int,
  238. bar_length_max: int = 20,
  239. log_params: bool = True,
  240. log_flops: bool = True,
  241. ):
  242. r"""
  243. Calculate and print ``model``'s statistics by adding hook and record Module's inputs outputs size.
  244. :param model: model that need to get stats info.
  245. :param input_size: size of input for running model and calculating stats.
  246. :param bar_length_max: size of bar indicating max flops or parameter size in net stats.
  247. :param log_params: whether print and record params size.
  248. :param log_flops: whether print and record op flops.
  249. """
  250. disable_receptive_field()
  251. def module_stats_hook(module, inputs, outputs, name=""):
  252. class_name = str(module.__class__).split(".")[-1].split("'")[0]
  253. flops_stats = get_op_stats(module, inputs, outputs)
  254. if flops_stats is not None:
  255. flops_stats["name"] = name
  256. flops_stats["class_name"] = class_name
  257. flops.append(flops_stats)
  258. if hasattr(module, "weight") and module.weight is not None:
  259. w = module.weight
  260. param_stats = get_param_stats(w.numpy())
  261. param_stats["name"] = name + "-w"
  262. params.append(param_stats)
  263. if hasattr(module, "bias") and module.bias is not None:
  264. b = module.bias
  265. param_stats = get_param_stats(b.numpy())
  266. param_stats["name"] = name + "-b"
  267. params.append(param_stats)
  268. @contextlib.contextmanager
  269. def adjust_stats(module, training=False):
  270. """Adjust module to training/eval mode temporarily.
  271. Args:
  272. module (M.Module): used module.
  273. training (bool): training mode. True for train mode, False fro eval mode.
  274. """
  275. def recursive_backup_stats(module, mode):
  276. for m in module.modules():
  277. # save prev status to _prev_training
  278. m._prev_training = m.training
  279. m.train(mode, recursive=False)
  280. def recursive_recover_stats(module):
  281. for m in module.modules():
  282. # recover prev status and delete attribute
  283. m.training = m._prev_training
  284. delattr(m, "_prev_training")
  285. recursive_backup_stats(module, mode=training)
  286. yield module
  287. recursive_recover_stats(module)
  288. # multiple inputs to the network
  289. if not isinstance(input_size[0], tuple):
  290. input_size = [input_size]
  291. params = []
  292. flops = []
  293. hooks = []
  294. for (name, module) in model.named_modules():
  295. if isinstance(module, hook_modules):
  296. hooks.append(
  297. module.register_forward_hook(partial(module_stats_hook, name=name))
  298. )
  299. inputs = [zeros(in_size, dtype=np.float32) for in_size in input_size]
  300. with adjust_stats(model, training=False) as model:
  301. model(*inputs)
  302. for h in hooks:
  303. h.remove()
  304. extra_info = {
  305. "#params": len(params),
  306. }
  307. total_flops, total_param_dims, total_param_size = 0, 0, 0
  308. if log_params:
  309. total_param_dims, total_param_size = print_param_stats(params, bar_length_max)
  310. extra_info["total_param_dims"] = sizeof_fmt(total_param_dims)
  311. extra_info["total_param_size"] = sizeof_fmt(total_param_size)
  312. if log_flops:
  313. total_flops = print_op_stats(flops, bar_length_max)
  314. extra_info["total_flops"] = sizeof_fmt(total_flops, suffix="OPs")
  315. if log_params and log_flops:
  316. extra_info["flops/param_size"] = "{:3.3f}".format(
  317. total_flops / total_param_size
  318. )
  319. print_summary(**extra_info)
  320. return total_param_size, total_flops

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台