You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

module_stats.py 20 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. import collections
  2. import functools
  3. from collections import namedtuple
  4. from contextlib import contextmanager
  5. from functools import partial
  6. from typing import Iterable
  7. import numpy as np
  8. import tabulate
  9. from .. import Tensor
  10. from .. import functional as F
  11. from .. import get_logger
  12. from .. import module as M
  13. from ..core.tensor.dtype import get_dtype_bit
  14. from ..logger import MegEngineLogFormatter
  15. from .module_utils import set_module_mode_safe
  16. try:
  17. MegEngineLogFormatter.max_lines = float("inf")
  18. except AttributeError as e:
  19. raise ValueError("set logger max lines failed")
  20. logger = get_logger(__name__)
  21. logger.setLevel("INFO")
  22. _calc_flops_dict = {}
  23. _calc_receptive_field_dict = {}
  24. def _receptive_field_fallback(module, inputs, outputs):
  25. if not _receptive_field_enabled:
  26. return
  27. assert not hasattr(module, "_rf")
  28. assert not hasattr(module, "_stride")
  29. if len(inputs) == 0:
  30. # TODO: support other dimension
  31. module._rf = (1, 1)
  32. module._stride = (1, 1)
  33. return module._rf, module._stride
  34. rf, stride = preprocess_receptive_field(module, inputs, outputs)
  35. module._rf = rf
  36. module._stride = stride
  37. return rf, stride
  38. # key tuple, impl_dict, fallback
  39. _iter_list = [
  40. ("flops_num", _calc_flops_dict, None),
  41. (
  42. ("receptive_field", "stride"),
  43. _calc_receptive_field_dict,
  44. _receptive_field_fallback,
  45. ),
  46. ]
  47. _receptive_field_enabled = False
  48. def _register_dict(*modules, dict=None):
  49. def callback(impl):
  50. for module in modules:
  51. dict[module] = impl
  52. return impl
  53. return callback
  54. def register_flops(*modules):
  55. return _register_dict(*modules, dict=_calc_flops_dict)
  56. def register_receptive_field(*modules):
  57. return _register_dict(*modules, dict=_calc_receptive_field_dict)
  58. def enable_receptive_field():
  59. global _receptive_field_enabled
  60. _receptive_field_enabled = True
  61. def disable_receptive_field():
  62. global _receptive_field_enabled
  63. _receptive_field_enabled = False
  64. @register_flops(M.Conv1d, M.Conv2d, M.Conv3d, M.LocalConv2d, M.DeformableConv2d)
  65. def flops_convNd(module: M.Conv2d, inputs, outputs):
  66. bias = 1 if module.bias is not None else 0
  67. # N x Cout x H x W x (Cin x Kw x Kh + bias)
  68. return np.prod(outputs[0].shape) * (
  69. float(module.in_channels // module.groups) * np.prod(module.kernel_size) + bias
  70. )
  71. @register_flops(M.ConvTranspose2d)
  72. def flops_convNdTranspose(module: M.Conv2d, inputs, outputs):
  73. bias = 1 if module.bias is not None else 0
  74. # N x Cout x H x W x (Cin x Kw x Kh + bias)
  75. return (
  76. np.prod(inputs[0].shape)
  77. * (module.out_channels // module.groups * np.prod(module.kernel_size))
  78. + np.prod(outputs[0].shape) * bias
  79. )
  80. @register_flops(
  81. M.batchnorm._BatchNorm, M.SyncBatchNorm, M.GroupNorm, M.LayerNorm, M.InstanceNorm,
  82. )
  83. def flops_norm(module: M.Linear, inputs, outputs):
  84. return np.prod(inputs[0].shape) * 7
  85. @register_flops(M.AvgPool2d, M.MaxPool2d)
  86. def flops_pool(module: M.AvgPool2d, inputs, outputs):
  87. kernel_sum = 0
  88. if isinstance(module.kernel_size, tuple) and len(module.kernel_size) == 2:
  89. kernel_sum = np.prod(module.kernel_size)
  90. else:
  91. kernel_sum = module.kernel_size ** 2
  92. return np.prod(outputs[0].shape) * kernel_sum
  93. @register_flops(M.AdaptiveAvgPool2d, M.AdaptiveMaxPool2d)
  94. def flops_adaptivePool(module: M.AdaptiveAvgPool2d, inputs, outputs):
  95. stride_h = np.floor(inputs[0].shape[2] / (inputs[0].shape[2] - 1))
  96. kernel_h = inputs[0].shape[2] - (inputs[0].shape[2] - 1) * stride_h
  97. stride_w = np.floor(inputs[0].shape[3] / (inputs[0].shape[3] - 1))
  98. kernel_w = inputs[0].shape[3] - (inputs[0].shape[3] - 1) * stride_w
  99. return np.prod(outputs[0].shape) * kernel_h * kernel_w
  100. @register_flops(M.Linear)
  101. def flops_linear(module: M.Linear, inputs, outputs):
  102. bias = module.out_features if module.bias is not None else 0
  103. return np.prod(outputs[0].shape) * module.in_features + bias
  104. @register_flops(M.BatchMatMulActivation)
  105. def flops_batchmatmul(module: M.BatchMatMulActivation, inputs, outputs):
  106. bias = 1 if module.bias is not None else 0
  107. x = inputs[0]
  108. w = module.weight
  109. batch_size = x.shape[0]
  110. n, p = x.shape[1:]
  111. _, m = w.shape[1:]
  112. return n * (p + bias) * m * batch_size
  113. # does not need import qat and quantized module since they inherit from float module.
  114. hook_modules = [
  115. M.conv._ConvNd,
  116. M.Linear,
  117. M.BatchMatMulActivation,
  118. M.batchnorm._BatchNorm,
  119. M.LayerNorm,
  120. M.GroupNorm,
  121. M.InstanceNorm,
  122. M.pooling._PoolNd,
  123. M.adaptive_pooling._AdaptivePoolNd,
  124. ]
  125. def register_hook_module(module):
  126. if isinstance(module, (tuple, list)):
  127. modules = module
  128. for module in modules:
  129. register_hook_module(module)
  130. elif issubclass(module, M.Module):
  131. hook_modules.append(module)
  132. else:
  133. raise TypeError("the param type should in [list,tuple,M.Module]")
  134. def _mean(inp):
  135. inp = Tensor(inp).astype(np.float32)
  136. return F.mean(inp).numpy()
  137. def _std(inp):
  138. inp = Tensor(inp).astype(np.float32)
  139. return F.std(inp).numpy()
  140. def dict2table(list_of_dict, header):
  141. table_data = [header]
  142. for d in list_of_dict:
  143. row = []
  144. for h in header:
  145. v = ""
  146. if h in d:
  147. v = d[h]
  148. row.append(v)
  149. table_data.append(row)
  150. return table_data
  151. def sizeof_fmt(num, suffix="B"):
  152. if suffix == "B":
  153. scale = 1024.0
  154. units = ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"]
  155. else:
  156. scale = 1000.0
  157. units = ["", "K", "M", "G", "T", "P", "E", "Z", "Y"]
  158. for unit in units:
  159. if abs(num) < scale or unit == units[-1]:
  160. return "{:3.3f} {}{}".format(num, unit, suffix)
  161. num /= scale
  162. def preprocess_receptive_field(module, inputs, outputs):
  163. # TODO: support other dimensions
  164. pre_rf = (
  165. max(getattr(i.owner, "_rf", (1, 1))[0] for i in inputs),
  166. max(getattr(i.owner, "_rf", (1, 1))[1] for i in inputs),
  167. )
  168. pre_stride = (
  169. max(getattr(i.owner, "_stride", (1, 1))[0] for i in inputs),
  170. max(getattr(i.owner, "_stride", (1, 1))[1] for i in inputs),
  171. )
  172. return pre_rf, pre_stride
  173. def get_op_stats(module, inputs, outputs):
  174. if not isinstance(outputs, tuple) and not isinstance(outputs, list):
  175. outputs = (outputs,)
  176. rst = {
  177. "input_shapes": [i.shape for i in inputs],
  178. "output_shapes": [o.shape for o in outputs],
  179. }
  180. valid_flag = False
  181. for key, _dict, fallback in _iter_list:
  182. for _type in _dict:
  183. if isinstance(module, _type):
  184. value = _dict[_type](module, inputs, outputs)
  185. valid_flag = True
  186. break
  187. else:
  188. if fallback is not None:
  189. value = fallback(module, inputs, outputs)
  190. continue
  191. if isinstance(key, tuple):
  192. assert isinstance(value, tuple)
  193. for k, v in zip(key, value):
  194. rst[k] = v
  195. else:
  196. rst[key] = value
  197. if valid_flag:
  198. return rst
  199. else:
  200. return None
  201. return
  202. def sum_op_stats(flops, bar_length_max=20):
  203. max_flops_num = max([i["flops_num"] for i in flops] + [0])
  204. total_flops_num = 0
  205. for d in flops:
  206. total_flops_num += int(d["flops_num"])
  207. d["flops_cum"] = sizeof_fmt(total_flops_num, suffix="OPs")
  208. for d in flops:
  209. ratio = d["ratio"] = d["flops_num"] / total_flops_num
  210. d["percentage"] = "{:.2f}%".format(ratio * 100)
  211. bar_length = int(d["flops_num"] / max_flops_num * bar_length_max)
  212. d["bar"] = "#" * bar_length
  213. d["flops"] = sizeof_fmt(d["flops_num"], suffix="OPs")
  214. total_flops_str = sizeof_fmt(total_flops_num, suffix="OPs")
  215. total_var_size = sum(
  216. sum(s[1] if len(s) > 1 else 0 for s in d["output_shapes"]) for d in flops
  217. )
  218. flops.append(
  219. dict(name="total", flops=total_flops_str, output_shapes=total_var_size)
  220. )
  221. return total_flops_num, flops
  222. def print_op_stats(flops):
  223. header = [
  224. "name",
  225. "class_name",
  226. "input_shapes",
  227. "output_shapes",
  228. "flops",
  229. "flops_cum",
  230. "percentage",
  231. "bar",
  232. ]
  233. if _receptive_field_enabled:
  234. header.insert(4, "receptive_field")
  235. header.insert(5, "stride")
  236. logger.info("flops stats: \n" + tabulate.tabulate(dict2table(flops, header=header)))
  237. def get_param_stats(param: Tensor):
  238. nbits = get_dtype_bit(np.dtype(param.dtype).name)
  239. shape = param.shape
  240. param_dim = np.prod(param.shape)
  241. param_size = param_dim * nbits // 8
  242. return {
  243. "dtype": np.dtype(param.dtype),
  244. "shape": shape,
  245. "mean": "{:.3g}".format(_mean(param)),
  246. "std": "{:.3g}".format(_std(param)),
  247. "param_dim": param_dim,
  248. "nbits": nbits,
  249. "size": param_size,
  250. }
  251. def sum_param_stats(params, bar_length_max=20):
  252. max_size = max([d["size"] for d in params] + [0])
  253. total_param_dims, total_param_size = 0, 0
  254. for d in params:
  255. total_param_dims += int(d["param_dim"])
  256. total_param_size += int(d["size"])
  257. d["size_cum"] = sizeof_fmt(total_param_size)
  258. for d in params:
  259. ratio = d["size"] / total_param_size
  260. d["ratio"] = ratio
  261. d["percentage"] = "{:.2f}%".format(ratio * 100)
  262. bar_length = int(d["size"] / max_size * bar_length_max)
  263. d["size_bar"] = "#" * bar_length
  264. d["size"] = sizeof_fmt(d["size"])
  265. param_size = sizeof_fmt(total_param_size)
  266. params.append(dict(name="total", param_dim=total_param_dims, size=param_size,))
  267. return total_param_dims, total_param_size, params
  268. def print_param_stats(params):
  269. header = [
  270. "name",
  271. "dtype",
  272. "shape",
  273. "mean",
  274. "std",
  275. "param_dim",
  276. "nbits",
  277. "size",
  278. "size_cum",
  279. "percentage",
  280. "size_bar",
  281. ]
  282. logger.info(
  283. "param stats: \n" + tabulate.tabulate(dict2table(params, header=header))
  284. )
  285. def get_activation_stats(output: Tensor, has_input=False):
  286. out_shape = output.shape
  287. activations_dtype = np.dtype(output.dtype)
  288. nbits = get_dtype_bit(activations_dtype.name)
  289. act_dim = np.prod(out_shape)
  290. act_size = act_dim * nbits // 8
  291. activation_stats = {
  292. "dtype": activations_dtype,
  293. "shape": out_shape,
  294. "act_dim": act_dim,
  295. "nbits": nbits,
  296. "size": act_size,
  297. }
  298. if has_input:
  299. activation_stats["mean"] = "{:.3g}".format(_mean(output))
  300. activation_stats["std"] = "{:.3g}".format(_std(output))
  301. return activation_stats
  302. def sum_activations_stats(activations, bar_length_max=20):
  303. max_act_size = max([i["size"] for i in activations] + [0])
  304. total_act_dims, total_act_size = 0, 0
  305. for d in activations:
  306. total_act_size += int(d["size"])
  307. total_act_dims += int(d["act_dim"])
  308. d["size_cum"] = sizeof_fmt(total_act_size)
  309. for d in activations:
  310. ratio = d["ratio"] = d["size"] / total_act_size
  311. d["percentage"] = "{:.2f}%".format(ratio * 100)
  312. bar_length = int(d["size"] / max_act_size * bar_length_max)
  313. d["size_bar"] = "#" * bar_length
  314. d["size"] = sizeof_fmt(d["size"])
  315. act_size = sizeof_fmt(total_act_size)
  316. activations.append(dict(name="total", act_dim=total_act_dims, size=act_size,))
  317. return total_act_dims, total_act_size, activations
  318. def print_activations_stats(activations, has_input=False):
  319. header = [
  320. "name",
  321. "class_name",
  322. "dtype",
  323. "shape",
  324. "nbits",
  325. "act_dim",
  326. "size",
  327. "size_cum",
  328. "percentage",
  329. "size_bar",
  330. ]
  331. if has_input:
  332. header.insert(4, "mean")
  333. header.insert(5, "std")
  334. logger.info(
  335. "activations stats: \n"
  336. + tabulate.tabulate(dict2table(activations, header=header))
  337. )
  338. def print_summary(**kwargs):
  339. data = [["item", "value"]]
  340. data.extend(list(kwargs.items()))
  341. logger.info("summary\n" + tabulate.tabulate(data))
  342. def module_stats(
  343. model: M.Module,
  344. inputs: Iterable[np.ndarray] = None,
  345. input_shapes: list = None,
  346. cal_params: bool = True,
  347. cal_flops: bool = True,
  348. cal_activations: bool = True,
  349. logging_to_stdout: bool = True,
  350. bar_length_max: int = 20,
  351. ):
  352. r"""Calculate and print ``model``'s statistics by adding hook and record Module's inputs outputs size.
  353. Args:
  354. model: model that need to get stats info.
  355. inputs: user defined input data for running model and calculating stats, alternative with input_shapes.
  356. input_shapes: shapes to generate random inputs for running model and calculating stats, alternative with inputs.
  357. cal_params: whether calculate and record params size.
  358. cal_flops: whether calculate and record op flops.
  359. cal_activations: whether calculate and record op activations.
  360. logging_to_stdout: whether print all calculated statistic details.
  361. bar_length_max: size of bar indicating max flops or parameter size in net stats.
  362. """
  363. has_inputs = False
  364. if inputs is not None:
  365. has_inputs = True
  366. if not isinstance(inputs, (tuple, list)):
  367. inputs = [inputs]
  368. def load_tensor(x):
  369. if isinstance(x, np.ndarray):
  370. return Tensor(x)
  371. elif isinstance(x, collections.abc.Mapping):
  372. return {k: load_tensor(v) for k, v in x.items()}
  373. elif isinstance(x, tuple) and hasattr(x, "_fields"): # nametuple
  374. return type(x)(*(load_tensor(value) for value in x))
  375. elif isinstance(x, collections.abc.Sequence):
  376. return [load_tensor(v) for v in x]
  377. else:
  378. return Tensor(x, dtype=np.float32)
  379. inputs = load_tensor(inputs)
  380. else:
  381. if input_shapes:
  382. if not isinstance(input_shapes[0], tuple):
  383. input_shapes = [input_shapes]
  384. inputs = [F.zeros(in_size, dtype=np.float32) for in_size in input_shapes]
  385. else:
  386. logger.error(
  387. "Inputs or input_shapes is required for running model and calculating stats.",
  388. exc_info=True,
  389. )
  390. return
  391. if not cal_activations:
  392. log_activations = False
  393. disable_receptive_field()
  394. recorded_parameters = set()
  395. def module_stats_hook(module, inputs, outputs, name=""):
  396. class_name = str(module.__class__).split(".")[-1].split("'")[0]
  397. if cal_flops:
  398. flops_stats = get_op_stats(module, inputs, outputs)
  399. if flops_stats is not None:
  400. flops_stats["name"] = name
  401. flops_stats["class_name"] = class_name
  402. flops.append(flops_stats)
  403. if cal_params:
  404. if (
  405. hasattr(module, "weight")
  406. and (module.weight is not None)
  407. and module.weight not in recorded_parameters
  408. ):
  409. w = module.weight
  410. param_stats = get_param_stats(w)
  411. param_stats["name"] = name + "-w"
  412. params.append(param_stats)
  413. recorded_parameters.add(w)
  414. if (
  415. hasattr(module, "bias")
  416. and module.bias is not None
  417. and module.bias not in recorded_parameters
  418. ):
  419. b = module.bias
  420. param_stats = get_param_stats(b)
  421. param_stats["name"] = name + "-b"
  422. params.append(param_stats)
  423. recorded_parameters.add(b)
  424. if cal_activations:
  425. if not isinstance(outputs, (tuple, list)):
  426. output = outputs
  427. else:
  428. output = outputs[0]
  429. activation_stats = get_activation_stats(output, has_inputs)
  430. activation_stats["name"] = name
  431. activation_stats["class_name"] = class_name
  432. activations.append(activation_stats)
  433. params = []
  434. flops = []
  435. hooks = []
  436. activations = []
  437. total_stats = namedtuple(
  438. "total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
  439. )
  440. stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
  441. module_to_name = dict()
  442. for (name, module) in model.named_modules():
  443. if isinstance(module, tuple(hook_modules)):
  444. hooks.append(
  445. module.register_forward_hook(partial(module_stats_hook, name=name))
  446. )
  447. module_to_name[module] = name
  448. @contextmanager
  449. def param_stat_context():
  450. def wrapper(fun):
  451. @functools.wraps(fun)
  452. def param_access_record(module, item):
  453. member = fun(module, item)
  454. if (
  455. item in ["weight", "bias"]
  456. and member is not None
  457. and member not in recorded_parameters
  458. ):
  459. name = module_to_name[module]
  460. if item == "weight":
  461. suffix = "-w"
  462. elif item == "bias":
  463. suffix = "-b"
  464. param_name = name + suffix
  465. param_stats = get_param_stats(member)
  466. param_stats["name"] = param_name
  467. params.append(param_stats)
  468. recorded_parameters.add(member)
  469. return member
  470. return param_access_record
  471. origin_get_attr = object.__getattribute__
  472. try:
  473. M.Module.__getattribute__ = wrapper(origin_get_attr)
  474. yield
  475. finally:
  476. M.Module.__getattribute__ = origin_get_attr
  477. with set_module_mode_safe(model, training=False) as model, param_stat_context():
  478. model(*inputs)
  479. for h in hooks:
  480. h.remove()
  481. extra_info = {
  482. "#params": len(params),
  483. }
  484. (
  485. total_flops,
  486. total_param_dims,
  487. total_param_size,
  488. total_act_dims,
  489. total_act_size,
  490. ) = (0, 0, 0, 0, 0)
  491. if cal_params:
  492. total_param_dims, total_param_size, params = sum_param_stats(
  493. params, bar_length_max
  494. )
  495. extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
  496. extra_info["total_param_size"] = sizeof_fmt(total_param_size)
  497. if logging_to_stdout:
  498. print_param_stats(params)
  499. if cal_flops:
  500. total_flops, flops = sum_op_stats(flops, bar_length_max)
  501. extra_info["total_flops"] = sizeof_fmt(total_flops, suffix="OPs")
  502. if logging_to_stdout:
  503. print_op_stats(flops)
  504. if cal_activations:
  505. total_act_dims, total_act_size, activations = sum_activations_stats(
  506. activations, bar_length_max
  507. )
  508. extra_info["total_act_dims"] = sizeof_fmt(total_act_dims, suffix="")
  509. extra_info["total_act_size"] = sizeof_fmt(total_act_size)
  510. if logging_to_stdout:
  511. print_activations_stats(activations, has_inputs)
  512. if cal_flops and cal_params and total_param_size != 0:
  513. extra_info["flops/param_size"] = "{:3.3f}".format(
  514. total_flops / total_param_size
  515. )
  516. print_summary(**extra_info)
  517. return (
  518. total_stats(
  519. param_size=total_param_size,
  520. param_dims=total_param_dims,
  521. flops=total_flops,
  522. act_size=total_act_size,
  523. act_dims=total_act_dims,
  524. ),
  525. stats_details(params=params, flops=flops, activations=activations),
  526. )