You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

module_stats.py 17 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. from collections import Iterable, namedtuple
  9. from functools import partial
  10. from typing import Iterable
  11. import numpy as np
  12. import tabulate
  13. import megengine as mge
  14. import megengine.module as m
  15. import megengine.module.qat as qatm
  16. import megengine.module.quantized as qm
  17. from megengine import Tensor
  18. from megengine import functional as F
  19. from megengine.core.tensor.dtype import get_dtype_bit
  20. from megengine.functional.tensor import zeros
  21. from megengine.tensor import Tensor
  22. from .module_utils import set_module_mode_safe
  23. try:
  24. mge.logger.MegEngineLogFormatter.max_lines = float("inf")
  25. except AttributeError as e:
  26. raise ValueError("set logger max lines failed")
  27. logger = mge.get_logger(__name__)
  28. logger.setLevel("INFO")
  29. _calc_flops_dict = {}
  30. _calc_receptive_field_dict = {}
  31. def _receptive_field_fallback(module, inputs, outputs):
  32. if not _receptive_field_enabled:
  33. return
  34. assert not hasattr(module, "_rf")
  35. assert not hasattr(module, "_stride")
  36. if len(inputs) == 0:
  37. # TODO: support other dimension
  38. module._rf = (1, 1)
  39. module._stride = (1, 1)
  40. return module._rf, module._stride
  41. rf, stride = preprocess_receptive_field(module, inputs, outputs)
  42. module._rf = rf
  43. module._stride = stride
  44. return rf, stride
  45. # key tuple, impl_dict, fallback
  46. _iter_list = [
  47. ("flops_num", _calc_flops_dict, None),
  48. (
  49. ("receptive_field", "stride"),
  50. _calc_receptive_field_dict,
  51. _receptive_field_fallback,
  52. ),
  53. ]
  54. _receptive_field_enabled = False
  55. def _register_dict(*modules, dict=None):
  56. def callback(impl):
  57. for module in modules:
  58. dict[module] = impl
  59. return impl
  60. return callback
  61. def register_flops(*modules):
  62. return _register_dict(*modules, dict=_calc_flops_dict)
  63. def register_receptive_field(*modules):
  64. return _register_dict(*modules, dict=_calc_receptive_field_dict)
  65. def enable_receptive_field():
  66. global _receptive_field_enabled
  67. _receptive_field_enabled = True
  68. def disable_receptive_field():
  69. global _receptive_field_enabled
  70. _receptive_field_enabled = False
  71. @register_flops(
  72. m.Conv1d, m.Conv2d, m.Conv3d, m.ConvTranspose2d, m.LocalConv2d, m.DeformableConv2d
  73. )
  74. def flops_convNd(module: m.Conv2d, inputs, outputs):
  75. bias = 1 if module.bias is not None else 0
  76. # N x Cout x H x W x (Cin x Kw x Kh + bias)
  77. return np.prod(outputs[0].shape) * (
  78. module.in_channels // module.groups * np.prod(module.kernel_size) + bias
  79. )
  80. @register_flops(
  81. m.batchnorm._BatchNorm, m.SyncBatchNorm, m.GroupNorm, m.LayerNorm, m.InstanceNorm,
  82. )
  83. def flops_norm(module: m.Linear, inputs, outputs):
  84. return np.prod(inputs[0].shape) * 7
  85. @register_flops(m.AvgPool2d, m.MaxPool2d)
  86. def flops_pool(module: m.AvgPool2d, inputs, outputs):
  87. return np.prod(outputs[0].shape) * (module.kernel_size ** 2)
  88. @register_flops(m.AdaptiveAvgPool2d, m.AdaptiveMaxPool2d)
  89. def flops_adaptivePool(module: m.AdaptiveAvgPool2d, inputs, outputs):
  90. stride_h = np.floor(inputs[0].shape[2] / (inputs[0].shape[2] - 1))
  91. kernel_h = inputs[0].shape[2] - (inputs[0].shape[2] - 1) * stride_h
  92. stride_w = np.floor(inputs[0].shape[3] / (inputs[0].shape[3] - 1))
  93. kernel_w = inputs[0].shape[3] - (inputs[0].shape[3] - 1) * stride_w
  94. return np.prod(outputs[0].shape) * kernel_h * kernel_w
  95. @register_flops(m.Linear)
  96. def flops_linear(module: m.Linear, inputs, outputs):
  97. bias = module.out_features if module.bias is not None else 0
  98. return np.prod(outputs[0].shape) * module.in_features + bias
  99. @register_flops(m.BatchMatMulActivation)
  100. def flops_batchmatmul(module: m.BatchMatMulActivation, inputs, outputs):
  101. bias = 1 if module.bias is not None else 0
  102. x = inputs[0]
  103. w = module.weight
  104. batch_size = x.shape[0]
  105. n, p = x.shape[1:]
  106. _, m = w.shape[1:]
  107. return n * (p + bias) * m * batch_size
  108. # does not need import qat and quantized module since they inherit from float module.
  109. hook_modules = (
  110. m.conv._ConvNd,
  111. m.Linear,
  112. m.BatchMatMulActivation,
  113. m.batchnorm._BatchNorm,
  114. m.LayerNorm,
  115. m.GroupNorm,
  116. m.InstanceNorm,
  117. m.pooling._PoolNd,
  118. m.adaptive_pooling._AdaptivePoolNd,
  119. )
  120. def _mean(inp):
  121. inp = mge.tensor(inp)
  122. return F.mean(inp).numpy()
  123. def _std(inp):
  124. inp = mge.tensor(inp)
  125. return F.std(inp).numpy()
  126. def dict2table(list_of_dict, header):
  127. table_data = [header]
  128. for d in list_of_dict:
  129. row = []
  130. for h in header:
  131. v = ""
  132. if h in d:
  133. v = d[h]
  134. row.append(v)
  135. table_data.append(row)
  136. return table_data
  137. def sizeof_fmt(num, suffix="B"):
  138. if suffix == "B":
  139. scale = 1024.0
  140. units = ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"]
  141. else:
  142. scale = 1000.0
  143. units = ["", "K", "M", "G", "T", "P", "E", "Z", "Y"]
  144. for unit in units:
  145. if abs(num) < scale or unit == units[-1]:
  146. return "{:3.3f} {}{}".format(num, unit, suffix)
  147. num /= scale
  148. def preprocess_receptive_field(module, inputs, outputs):
  149. # TODO: support other dimensions
  150. pre_rf = (
  151. max(getattr(i.owner, "_rf", (1, 1))[0] for i in inputs),
  152. max(getattr(i.owner, "_rf", (1, 1))[1] for i in inputs),
  153. )
  154. pre_stride = (
  155. max(getattr(i.owner, "_stride", (1, 1))[0] for i in inputs),
  156. max(getattr(i.owner, "_stride", (1, 1))[1] for i in inputs),
  157. )
  158. return pre_rf, pre_stride
  159. def get_op_stats(module, inputs, outputs):
  160. if not isinstance(outputs, tuple) and not isinstance(outputs, list):
  161. outputs = (outputs,)
  162. rst = {
  163. "input_shapes": [i.shape for i in inputs],
  164. "output_shapes": [o.shape for o in outputs],
  165. }
  166. valid_flag = False
  167. for key, _dict, fallback in _iter_list:
  168. for _type in _dict:
  169. if isinstance(module, _type):
  170. value = _dict[_type](module, inputs, outputs)
  171. valid_flag = True
  172. break
  173. else:
  174. if fallback is not None:
  175. value = fallback(module, inputs, outputs)
  176. continue
  177. if isinstance(key, tuple):
  178. assert isinstance(value, tuple)
  179. for k, v in zip(key, value):
  180. rst[k] = v
  181. else:
  182. rst[key] = value
  183. if valid_flag:
  184. return rst
  185. else:
  186. return None
  187. return
  188. def sum_op_stats(flops, bar_length_max=20):
  189. max_flops_num = max([i["flops_num"] for i in flops] + [0])
  190. total_flops_num = 0
  191. for d in flops:
  192. total_flops_num += int(d["flops_num"])
  193. d["flops_cum"] = sizeof_fmt(total_flops_num, suffix="OPs")
  194. for d in flops:
  195. ratio = d["ratio"] = d["flops_num"] / total_flops_num
  196. d["percentage"] = "{:.2f}%".format(ratio * 100)
  197. bar_length = int(d["flops_num"] / max_flops_num * bar_length_max)
  198. d["bar"] = "#" * bar_length
  199. d["flops"] = sizeof_fmt(d["flops_num"], suffix="OPs")
  200. total_flops_str = sizeof_fmt(total_flops_num, suffix="OPs")
  201. total_var_size = sum(
  202. sum(s[1] if len(s) > 1 else 0 for s in d["output_shapes"]) for d in flops
  203. )
  204. flops.append(
  205. dict(name="total", flops=total_flops_str, output_shapes=total_var_size)
  206. )
  207. return total_flops_num, flops
  208. def print_op_stats(flops):
  209. header = [
  210. "name",
  211. "class_name",
  212. "input_shapes",
  213. "output_shapes",
  214. "flops",
  215. "flops_cum",
  216. "percentage",
  217. "bar",
  218. ]
  219. if _receptive_field_enabled:
  220. header.insert(4, "receptive_field")
  221. header.insert(5, "stride")
  222. logger.info("flops stats: \n" + tabulate.tabulate(dict2table(flops, header=header)))
  223. def get_param_stats(param: Tensor):
  224. nbits = get_dtype_bit(np.dtype(param.dtype).name)
  225. shape = param.shape
  226. param_dim = np.prod(param.shape)
  227. param_size = param_dim * nbits // 8
  228. return {
  229. "dtype": np.dtype(param.dtype),
  230. "shape": shape,
  231. "mean": "{:.3g}".format(_mean(param)),
  232. "std": "{:.3g}".format(_std(param)),
  233. "param_dim": param_dim,
  234. "nbits": nbits,
  235. "size": param_size,
  236. }
  237. def sum_param_stats(params, bar_length_max=20):
  238. max_size = max([d["size"] for d in params] + [0])
  239. total_param_dims, total_param_size = 0, 0
  240. for d in params:
  241. total_param_dims += int(d["param_dim"])
  242. total_param_size += int(d["size"])
  243. d["size_cum"] = sizeof_fmt(total_param_size)
  244. for d in params:
  245. ratio = d["size"] / total_param_size
  246. d["ratio"] = ratio
  247. d["percentage"] = "{:.2f}%".format(ratio * 100)
  248. bar_length = int(d["size"] / max_size * bar_length_max)
  249. d["size_bar"] = "#" * bar_length
  250. d["size"] = sizeof_fmt(d["size"])
  251. param_size = sizeof_fmt(total_param_size)
  252. params.append(dict(name="total", param_dim=total_param_dims, size=param_size,))
  253. return total_param_dims, total_param_size, params
  254. def print_param_stats(params):
  255. header = [
  256. "name",
  257. "dtype",
  258. "shape",
  259. "mean",
  260. "std",
  261. "param_dim",
  262. "nbits",
  263. "size",
  264. "size_cum",
  265. "percentage",
  266. "size_bar",
  267. ]
  268. logger.info(
  269. "param stats: \n" + tabulate.tabulate(dict2table(params, header=header))
  270. )
  271. def get_activation_stats(output: np.ndarray, has_input=False):
  272. out_shape = output.shape
  273. activations_dtype = np.dtype(output.dtype)
  274. nbits = get_dtype_bit(activations_dtype.name)
  275. act_dim = np.prod(out_shape)
  276. act_size = act_dim * nbits // 8
  277. activation_stats = {
  278. "dtype": activations_dtype,
  279. "shape": out_shape,
  280. "act_dim": act_dim,
  281. "nbits": nbits,
  282. "size": act_size,
  283. }
  284. if has_input:
  285. activation_stats["mean"] = "{:.3g}".format(output.mean())
  286. activation_stats["std"] = "{:.3g}".format(output.std())
  287. return activation_stats
  288. def sum_activations_stats(activations, bar_length_max=20):
  289. max_act_size = max([i["size"] for i in activations] + [0])
  290. total_act_dims, total_act_size = 0, 0
  291. for d in activations:
  292. total_act_size += int(d["size"])
  293. total_act_dims += int(d["act_dim"])
  294. d["size_cum"] = sizeof_fmt(total_act_size)
  295. for d in activations:
  296. ratio = d["ratio"] = d["size"] / total_act_size
  297. d["percentage"] = "{:.2f}%".format(ratio * 100)
  298. bar_length = int(d["size"] / max_act_size * bar_length_max)
  299. d["size_bar"] = "#" * bar_length
  300. d["size"] = sizeof_fmt(d["size"])
  301. act_size = sizeof_fmt(total_act_size)
  302. activations.append(dict(name="total", act_dim=total_act_dims, size=act_size,))
  303. return total_act_dims, total_act_size, activations
  304. def print_activations_stats(activations, has_input=False):
  305. header = [
  306. "name",
  307. "class_name",
  308. "dtype",
  309. "shape",
  310. "nbits",
  311. "act_dim",
  312. "size",
  313. "size_cum",
  314. "percentage",
  315. "size_bar",
  316. ]
  317. if has_input:
  318. header.insert(4, "mean")
  319. header.insert(5, "std")
  320. logger.info(
  321. "activations stats: \n"
  322. + tabulate.tabulate(dict2table(activations, header=header))
  323. )
  324. def print_summary(**kwargs):
  325. data = [["item", "value"]]
  326. data.extend(list(kwargs.items()))
  327. logger.info("summary\n" + tabulate.tabulate(data))
  328. def module_stats(
  329. model: m.Module,
  330. inputs: Iterable[np.ndarray] = None,
  331. input_shapes: list = None,
  332. cal_params: bool = True,
  333. cal_flops: bool = True,
  334. cal_activations: bool = True,
  335. logging_to_stdout: bool = True,
  336. bar_length_max: int = 20,
  337. ):
  338. r"""
  339. Calculate and print ``model``'s statistics by adding hook and record Module's inputs outputs size.
  340. :param model: model that need to get stats info.
  341. :param inputs: user defined input data for running model and calculating stats, alternative with input_shapes.
  342. :param input_shapes: shapes to generate random inputs for running model and calculating stats, alternative with inputs.
  343. :param cal_params: whether calculate and record params size.
  344. :param cal_flops: whether calculate and record op flops.
  345. :param cal_activations: whether calculate and record op activations.
  346. :param logging_to_stdout: whether print all calculated statistic details.
  347. :param bar_length_max: size of bar indicating max flops or parameter size in net stats.
  348. """
  349. has_inputs = False
  350. if inputs is not None:
  351. has_inputs = True
  352. if not isinstance(inputs, (tuple, list)):
  353. inputs = [inputs]
  354. inputs = [Tensor(input, dtype=np.float32) for input in inputs]
  355. else:
  356. if input_shapes:
  357. if not isinstance(input_shapes[0], tuple):
  358. input_shapes = [input_shapes]
  359. inputs = [zeros(in_size, dtype=np.float32) for in_size in input_shapes]
  360. else:
  361. logger.error(
  362. "Inputs or input_shapes is required for running model and calculating stats.",
  363. exc_info=True,
  364. )
  365. return
  366. if not cal_activations:
  367. log_activations = False
  368. disable_receptive_field()
  369. def module_stats_hook(module, inputs, outputs, name=""):
  370. class_name = str(module.__class__).split(".")[-1].split("'")[0]
  371. if cal_flops:
  372. flops_stats = get_op_stats(module, inputs, outputs)
  373. if flops_stats is not None:
  374. flops_stats["name"] = name
  375. flops_stats["class_name"] = class_name
  376. flops.append(flops_stats)
  377. if cal_params:
  378. if hasattr(module, "weight") and module.weight is not None:
  379. w = module.weight
  380. param_stats = get_param_stats(w.numpy())
  381. param_stats["name"] = name + "-w"
  382. params.append(param_stats)
  383. if hasattr(module, "bias") and module.bias is not None:
  384. b = module.bias
  385. param_stats = get_param_stats(b.numpy())
  386. param_stats["name"] = name + "-b"
  387. params.append(param_stats)
  388. if cal_activations:
  389. if not isinstance(outputs, (tuple, list)):
  390. output = outputs.numpy()
  391. else:
  392. output = outputs[0].numpy()
  393. activation_stats = get_activation_stats(output, has_inputs)
  394. activation_stats["name"] = name
  395. activation_stats["class_name"] = class_name
  396. activations.append(activation_stats)
  397. params = []
  398. flops = []
  399. hooks = []
  400. activations = []
  401. total_stats = namedtuple("total_stats", ["param_size", "flops", "act_size"])
  402. stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
  403. for (name, module) in model.named_modules():
  404. if isinstance(module, hook_modules):
  405. hooks.append(
  406. module.register_forward_hook(partial(module_stats_hook, name=name))
  407. )
  408. with set_module_mode_safe(model, training=False) as model:
  409. model(*inputs)
  410. for h in hooks:
  411. h.remove()
  412. extra_info = {
  413. "#params": len(params),
  414. }
  415. (
  416. total_flops,
  417. total_param_dims,
  418. total_param_size,
  419. total_act_dims,
  420. total_act_size,
  421. ) = (0, 0, 0, 0, 0)
  422. if cal_params:
  423. total_param_dims, total_param_size, params = sum_param_stats(
  424. params, bar_length_max
  425. )
  426. extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
  427. extra_info["total_param_size"] = sizeof_fmt(total_param_size)
  428. if logging_to_stdout:
  429. print_param_stats(params)
  430. if cal_flops:
  431. total_flops, flops = sum_op_stats(flops, bar_length_max)
  432. extra_info["total_flops"] = sizeof_fmt(total_flops, suffix="OPs")
  433. if logging_to_stdout:
  434. print_op_stats(flops)
  435. if cal_activations:
  436. total_act_dims, total_act_size, activations = sum_activations_stats(
  437. activations, bar_length_max
  438. )
  439. extra_info["total_act_dims"] = sizeof_fmt(total_act_dims, suffix="")
  440. extra_info["total_act_size"] = sizeof_fmt(total_act_size)
  441. if logging_to_stdout:
  442. print_activations_stats(activations, has_inputs)
  443. if cal_flops and cal_params:
  444. extra_info["flops/param_size"] = "{:3.3f}".format(
  445. total_flops / total_param_size
  446. )
  447. print_summary(**extra_info)
  448. return (
  449. total_stats(
  450. param_size=total_param_size, flops=total_flops, act_size=total_act_size,
  451. ),
  452. stats_details(params=params, flops=flops, activations=activations),
  453. )

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台