You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

module_stats.py 18 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. import collections
  9. from collections import namedtuple
  10. from functools import partial
  11. from typing import Iterable
  12. import numpy as np
  13. import tabulate
  14. import megengine as mge
  15. import megengine.module as m
  16. import megengine.module.qat as qatm
  17. import megengine.module.quantized as qm
  18. from megengine import Tensor
  19. from megengine import functional as F
  20. from megengine.core.tensor.dtype import get_dtype_bit
  21. from megengine.functional.tensor import zeros
  22. from megengine.tensor import Tensor
  23. from .module_utils import set_module_mode_safe
  24. try:
  25. mge.logger.MegEngineLogFormatter.max_lines = float("inf")
  26. except AttributeError as e:
  27. raise ValueError("set logger max lines failed")
  28. logger = mge.get_logger(__name__)
  29. logger.setLevel("INFO")
  30. _calc_flops_dict = {}
  31. _calc_receptive_field_dict = {}
  32. def _receptive_field_fallback(module, inputs, outputs):
  33. if not _receptive_field_enabled:
  34. return
  35. assert not hasattr(module, "_rf")
  36. assert not hasattr(module, "_stride")
  37. if len(inputs) == 0:
  38. # TODO: support other dimension
  39. module._rf = (1, 1)
  40. module._stride = (1, 1)
  41. return module._rf, module._stride
  42. rf, stride = preprocess_receptive_field(module, inputs, outputs)
  43. module._rf = rf
  44. module._stride = stride
  45. return rf, stride
  46. # key tuple, impl_dict, fallback
  47. _iter_list = [
  48. ("flops_num", _calc_flops_dict, None),
  49. (
  50. ("receptive_field", "stride"),
  51. _calc_receptive_field_dict,
  52. _receptive_field_fallback,
  53. ),
  54. ]
  55. _receptive_field_enabled = False
  56. def _register_dict(*modules, dict=None):
  57. def callback(impl):
  58. for module in modules:
  59. dict[module] = impl
  60. return impl
  61. return callback
  62. def register_flops(*modules):
  63. return _register_dict(*modules, dict=_calc_flops_dict)
  64. def register_receptive_field(*modules):
  65. return _register_dict(*modules, dict=_calc_receptive_field_dict)
  66. def enable_receptive_field():
  67. global _receptive_field_enabled
  68. _receptive_field_enabled = True
  69. def disable_receptive_field():
  70. global _receptive_field_enabled
  71. _receptive_field_enabled = False
  72. @register_flops(
  73. m.Conv1d, m.Conv2d, m.Conv3d, m.ConvTranspose2d, m.LocalConv2d, m.DeformableConv2d
  74. )
  75. def flops_convNd(module: m.Conv2d, inputs, outputs):
  76. bias = 1 if module.bias is not None else 0
  77. # N x Cout x H x W x (Cin x Kw x Kh + bias)
  78. return np.prod(outputs[0].shape) * (
  79. module.in_channels // module.groups * np.prod(module.kernel_size) + bias
  80. )
  81. @register_flops(
  82. m.batchnorm._BatchNorm, m.SyncBatchNorm, m.GroupNorm, m.LayerNorm, m.InstanceNorm,
  83. )
  84. def flops_norm(module: m.Linear, inputs, outputs):
  85. return np.prod(inputs[0].shape) * 7
  86. @register_flops(m.AvgPool2d, m.MaxPool2d)
  87. def flops_pool(module: m.AvgPool2d, inputs, outputs):
  88. kernel_sum = 0
  89. if isinstance(module.kernel_size, tuple) and len(module.kernel_size) == 2:
  90. kernel_sum = np.prod(module.kernel_size)
  91. else:
  92. kernel_sum = module.kernel_size ** 2
  93. return np.prod(outputs[0].shape) * kernel_sum
  94. @register_flops(m.AdaptiveAvgPool2d, m.AdaptiveMaxPool2d)
  95. def flops_adaptivePool(module: m.AdaptiveAvgPool2d, inputs, outputs):
  96. stride_h = np.floor(inputs[0].shape[2] / (inputs[0].shape[2] - 1))
  97. kernel_h = inputs[0].shape[2] - (inputs[0].shape[2] - 1) * stride_h
  98. stride_w = np.floor(inputs[0].shape[3] / (inputs[0].shape[3] - 1))
  99. kernel_w = inputs[0].shape[3] - (inputs[0].shape[3] - 1) * stride_w
  100. return np.prod(outputs[0].shape) * kernel_h * kernel_w
  101. @register_flops(m.Linear)
  102. def flops_linear(module: m.Linear, inputs, outputs):
  103. bias = module.out_features if module.bias is not None else 0
  104. return np.prod(outputs[0].shape) * module.in_features + bias
  105. @register_flops(m.BatchMatMulActivation)
  106. def flops_batchmatmul(module: m.BatchMatMulActivation, inputs, outputs):
  107. bias = 1 if module.bias is not None else 0
  108. x = inputs[0]
  109. w = module.weight
  110. batch_size = x.shape[0]
  111. n, p = x.shape[1:]
  112. _, m = w.shape[1:]
  113. return n * (p + bias) * m * batch_size
  114. # does not need import qat and quantized module since they inherit from float module.
  115. hook_modules = (
  116. m.conv._ConvNd,
  117. m.Linear,
  118. m.BatchMatMulActivation,
  119. m.batchnorm._BatchNorm,
  120. m.LayerNorm,
  121. m.GroupNorm,
  122. m.InstanceNorm,
  123. m.pooling._PoolNd,
  124. m.adaptive_pooling._AdaptivePoolNd,
  125. )
  126. def _mean(inp):
  127. inp = mge.tensor(inp).astype(np.float32)
  128. return F.mean(inp).numpy()
  129. def _std(inp):
  130. inp = mge.tensor(inp).astype(np.float32)
  131. return F.std(inp).numpy()
  132. def dict2table(list_of_dict, header):
  133. table_data = [header]
  134. for d in list_of_dict:
  135. row = []
  136. for h in header:
  137. v = ""
  138. if h in d:
  139. v = d[h]
  140. row.append(v)
  141. table_data.append(row)
  142. return table_data
  143. def sizeof_fmt(num, suffix="B"):
  144. if suffix == "B":
  145. scale = 1024.0
  146. units = ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"]
  147. else:
  148. scale = 1000.0
  149. units = ["", "K", "M", "G", "T", "P", "E", "Z", "Y"]
  150. for unit in units:
  151. if abs(num) < scale or unit == units[-1]:
  152. return "{:3.3f} {}{}".format(num, unit, suffix)
  153. num /= scale
  154. def preprocess_receptive_field(module, inputs, outputs):
  155. # TODO: support other dimensions
  156. pre_rf = (
  157. max(getattr(i.owner, "_rf", (1, 1))[0] for i in inputs),
  158. max(getattr(i.owner, "_rf", (1, 1))[1] for i in inputs),
  159. )
  160. pre_stride = (
  161. max(getattr(i.owner, "_stride", (1, 1))[0] for i in inputs),
  162. max(getattr(i.owner, "_stride", (1, 1))[1] for i in inputs),
  163. )
  164. return pre_rf, pre_stride
  165. def get_op_stats(module, inputs, outputs):
  166. if not isinstance(outputs, tuple) and not isinstance(outputs, list):
  167. outputs = (outputs,)
  168. rst = {
  169. "input_shapes": [i.shape for i in inputs],
  170. "output_shapes": [o.shape for o in outputs],
  171. }
  172. valid_flag = False
  173. for key, _dict, fallback in _iter_list:
  174. for _type in _dict:
  175. if isinstance(module, _type):
  176. value = _dict[_type](module, inputs, outputs)
  177. valid_flag = True
  178. break
  179. else:
  180. if fallback is not None:
  181. value = fallback(module, inputs, outputs)
  182. continue
  183. if isinstance(key, tuple):
  184. assert isinstance(value, tuple)
  185. for k, v in zip(key, value):
  186. rst[k] = v
  187. else:
  188. rst[key] = value
  189. if valid_flag:
  190. return rst
  191. else:
  192. return None
  193. return
  194. def sum_op_stats(flops, bar_length_max=20):
  195. max_flops_num = max([i["flops_num"] for i in flops] + [0])
  196. total_flops_num = 0
  197. for d in flops:
  198. total_flops_num += int(d["flops_num"])
  199. d["flops_cum"] = sizeof_fmt(total_flops_num, suffix="OPs")
  200. for d in flops:
  201. ratio = d["ratio"] = d["flops_num"] / total_flops_num
  202. d["percentage"] = "{:.2f}%".format(ratio * 100)
  203. bar_length = int(d["flops_num"] / max_flops_num * bar_length_max)
  204. d["bar"] = "#" * bar_length
  205. d["flops"] = sizeof_fmt(d["flops_num"], suffix="OPs")
  206. total_flops_str = sizeof_fmt(total_flops_num, suffix="OPs")
  207. total_var_size = sum(
  208. sum(s[1] if len(s) > 1 else 0 for s in d["output_shapes"]) for d in flops
  209. )
  210. flops.append(
  211. dict(name="total", flops=total_flops_str, output_shapes=total_var_size)
  212. )
  213. return total_flops_num, flops
  214. def print_op_stats(flops):
  215. header = [
  216. "name",
  217. "class_name",
  218. "input_shapes",
  219. "output_shapes",
  220. "flops",
  221. "flops_cum",
  222. "percentage",
  223. "bar",
  224. ]
  225. if _receptive_field_enabled:
  226. header.insert(4, "receptive_field")
  227. header.insert(5, "stride")
  228. logger.info("flops stats: \n" + tabulate.tabulate(dict2table(flops, header=header)))
  229. def get_param_stats(param: Tensor):
  230. nbits = get_dtype_bit(np.dtype(param.dtype).name)
  231. shape = param.shape
  232. param_dim = np.prod(param.shape)
  233. param_size = param_dim * nbits // 8
  234. return {
  235. "dtype": np.dtype(param.dtype),
  236. "shape": shape,
  237. "mean": "{:.3g}".format(_mean(param)),
  238. "std": "{:.3g}".format(_std(param)),
  239. "param_dim": param_dim,
  240. "nbits": nbits,
  241. "size": param_size,
  242. }
  243. def sum_param_stats(params, bar_length_max=20):
  244. max_size = max([d["size"] for d in params] + [0])
  245. total_param_dims, total_param_size = 0, 0
  246. for d in params:
  247. total_param_dims += int(d["param_dim"])
  248. total_param_size += int(d["size"])
  249. d["size_cum"] = sizeof_fmt(total_param_size)
  250. for d in params:
  251. ratio = d["size"] / total_param_size
  252. d["ratio"] = ratio
  253. d["percentage"] = "{:.2f}%".format(ratio * 100)
  254. bar_length = int(d["size"] / max_size * bar_length_max)
  255. d["size_bar"] = "#" * bar_length
  256. d["size"] = sizeof_fmt(d["size"])
  257. param_size = sizeof_fmt(total_param_size)
  258. params.append(dict(name="total", param_dim=total_param_dims, size=param_size,))
  259. return total_param_dims, total_param_size, params
  260. def print_param_stats(params):
  261. header = [
  262. "name",
  263. "dtype",
  264. "shape",
  265. "mean",
  266. "std",
  267. "param_dim",
  268. "nbits",
  269. "size",
  270. "size_cum",
  271. "percentage",
  272. "size_bar",
  273. ]
  274. logger.info(
  275. "param stats: \n" + tabulate.tabulate(dict2table(params, header=header))
  276. )
  277. def get_activation_stats(output: Tensor, has_input=False):
  278. out_shape = output.shape
  279. activations_dtype = np.dtype(output.dtype)
  280. nbits = get_dtype_bit(activations_dtype.name)
  281. act_dim = np.prod(out_shape)
  282. act_size = act_dim * nbits // 8
  283. activation_stats = {
  284. "dtype": activations_dtype,
  285. "shape": out_shape,
  286. "act_dim": act_dim,
  287. "nbits": nbits,
  288. "size": act_size,
  289. }
  290. if has_input:
  291. activation_stats["mean"] = "{:.3g}".format(_mean(output))
  292. activation_stats["std"] = "{:.3g}".format(_std(output))
  293. return activation_stats
  294. def sum_activations_stats(activations, bar_length_max=20):
  295. max_act_size = max([i["size"] for i in activations] + [0])
  296. total_act_dims, total_act_size = 0, 0
  297. for d in activations:
  298. total_act_size += int(d["size"])
  299. total_act_dims += int(d["act_dim"])
  300. d["size_cum"] = sizeof_fmt(total_act_size)
  301. for d in activations:
  302. ratio = d["ratio"] = d["size"] / total_act_size
  303. d["percentage"] = "{:.2f}%".format(ratio * 100)
  304. bar_length = int(d["size"] / max_act_size * bar_length_max)
  305. d["size_bar"] = "#" * bar_length
  306. d["size"] = sizeof_fmt(d["size"])
  307. act_size = sizeof_fmt(total_act_size)
  308. activations.append(dict(name="total", act_dim=total_act_dims, size=act_size,))
  309. return total_act_dims, total_act_size, activations
  310. def print_activations_stats(activations, has_input=False):
  311. header = [
  312. "name",
  313. "class_name",
  314. "dtype",
  315. "shape",
  316. "nbits",
  317. "act_dim",
  318. "size",
  319. "size_cum",
  320. "percentage",
  321. "size_bar",
  322. ]
  323. if has_input:
  324. header.insert(4, "mean")
  325. header.insert(5, "std")
  326. logger.info(
  327. "activations stats: \n"
  328. + tabulate.tabulate(dict2table(activations, header=header))
  329. )
  330. def print_summary(**kwargs):
  331. data = [["item", "value"]]
  332. data.extend(list(kwargs.items()))
  333. logger.info("summary\n" + tabulate.tabulate(data))
  334. def module_stats(
  335. model: m.Module,
  336. inputs: Iterable[np.ndarray] = None,
  337. input_shapes: list = None,
  338. cal_params: bool = True,
  339. cal_flops: bool = True,
  340. cal_activations: bool = True,
  341. logging_to_stdout: bool = True,
  342. bar_length_max: int = 20,
  343. ):
  344. r"""Calculate and print ``model``'s statistics by adding hook and record Module's inputs outputs size.
  345. Args:
  346. model: model that need to get stats info.
  347. inputs: user defined input data for running model and calculating stats, alternative with input_shapes.
  348. input_shapes: shapes to generate random inputs for running model and calculating stats, alternative with inputs.
  349. cal_params: whether calculate and record params size.
  350. cal_flops: whether calculate and record op flops.
  351. cal_activations: whether calculate and record op activations.
  352. logging_to_stdout: whether print all calculated statistic details.
  353. bar_length_max: size of bar indicating max flops or parameter size in net stats.
  354. """
  355. has_inputs = False
  356. if inputs is not None:
  357. has_inputs = True
  358. if not isinstance(inputs, (tuple, list)):
  359. inputs = [inputs]
  360. def load_tensor(x):
  361. if isinstance(x, np.ndarray):
  362. return Tensor(x)
  363. elif isinstance(x, collections.abc.Mapping):
  364. return {k: load_tensor(x) for k, v in x.items()}
  365. elif isinstance(x, tuple) and hasattr(x, "_fields"): # nametuple
  366. return type(x)(*(load_tensor(value) for value in x))
  367. elif isinstance(x, collections.abc.Sequence):
  368. return [load_tensor(v) for v in x]
  369. else:
  370. return Tensor(x, dtype=np.float32)
  371. inputs = load_tensor(inputs)
  372. else:
  373. if input_shapes:
  374. if not isinstance(input_shapes[0], tuple):
  375. input_shapes = [input_shapes]
  376. inputs = [zeros(in_size, dtype=np.float32) for in_size in input_shapes]
  377. else:
  378. logger.error(
  379. "Inputs or input_shapes is required for running model and calculating stats.",
  380. exc_info=True,
  381. )
  382. return
  383. if not cal_activations:
  384. log_activations = False
  385. disable_receptive_field()
  386. def module_stats_hook(module, inputs, outputs, name=""):
  387. class_name = str(module.__class__).split(".")[-1].split("'")[0]
  388. if cal_flops:
  389. flops_stats = get_op_stats(module, inputs, outputs)
  390. if flops_stats is not None:
  391. flops_stats["name"] = name
  392. flops_stats["class_name"] = class_name
  393. flops.append(flops_stats)
  394. if cal_params:
  395. if hasattr(module, "weight") and module.weight is not None:
  396. w = module.weight
  397. param_stats = get_param_stats(w)
  398. param_stats["name"] = name + "-w"
  399. params.append(param_stats)
  400. if hasattr(module, "bias") and module.bias is not None:
  401. b = module.bias
  402. param_stats = get_param_stats(b)
  403. param_stats["name"] = name + "-b"
  404. params.append(param_stats)
  405. if cal_activations:
  406. if not isinstance(outputs, (tuple, list)):
  407. output = outputs
  408. else:
  409. output = outputs[0]
  410. activation_stats = get_activation_stats(output, has_inputs)
  411. activation_stats["name"] = name
  412. activation_stats["class_name"] = class_name
  413. activations.append(activation_stats)
  414. params = []
  415. flops = []
  416. hooks = []
  417. activations = []
  418. total_stats = namedtuple(
  419. "total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
  420. )
  421. stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
  422. for (name, module) in model.named_modules():
  423. if isinstance(module, hook_modules):
  424. hooks.append(
  425. module.register_forward_hook(partial(module_stats_hook, name=name))
  426. )
  427. with set_module_mode_safe(model, training=False) as model:
  428. model(*inputs)
  429. for h in hooks:
  430. h.remove()
  431. extra_info = {
  432. "#params": len(params),
  433. }
  434. (
  435. total_flops,
  436. total_param_dims,
  437. total_param_size,
  438. total_act_dims,
  439. total_act_size,
  440. ) = (0, 0, 0, 0, 0)
  441. if cal_params:
  442. total_param_dims, total_param_size, params = sum_param_stats(
  443. params, bar_length_max
  444. )
  445. extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
  446. extra_info["total_param_size"] = sizeof_fmt(total_param_size)
  447. if logging_to_stdout:
  448. print_param_stats(params)
  449. if cal_flops:
  450. total_flops, flops = sum_op_stats(flops, bar_length_max)
  451. extra_info["total_flops"] = sizeof_fmt(total_flops, suffix="OPs")
  452. if logging_to_stdout:
  453. print_op_stats(flops)
  454. if cal_activations:
  455. total_act_dims, total_act_size, activations = sum_activations_stats(
  456. activations, bar_length_max
  457. )
  458. extra_info["total_act_dims"] = sizeof_fmt(total_act_dims, suffix="")
  459. extra_info["total_act_size"] = sizeof_fmt(total_act_size)
  460. if logging_to_stdout:
  461. print_activations_stats(activations, has_inputs)
  462. if cal_flops and cal_params and total_param_size != 0:
  463. extra_info["flops/param_size"] = "{:3.3f}".format(
  464. total_flops / total_param_size
  465. )
  466. print_summary(**extra_info)
  467. return (
  468. total_stats(
  469. param_size=total_param_size,
  470. param_dims=total_param_dims,
  471. flops=total_flops,
  472. act_size=total_act_size,
  473. act_dims=total_act_dims,
  474. ),
  475. stats_details(params=params, flops=flops, activations=activations),
  476. )