You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

module_stats.py 8.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. from functools import partial
  9. import numpy as np
  10. import tabulate
  11. import megengine as mge
  12. import megengine.module as m
  13. import megengine.module.qat as qatm
  14. import megengine.module.quantized as qm
  15. from megengine.core.tensor.dtype import get_dtype_bit
  16. from megengine.functional.tensor import zeros
  17. try:
  18. mge.logger.MegEngineLogFormatter.max_lines = float("inf")
  19. except AttributeError as e:
  20. raise ValueError("set logger max lines failed")
  21. logger = mge.get_logger(__name__)
  22. logger.setLevel("INFO")
  23. CALC_FLOPS = {}
  24. def _register_modules(*modules):
  25. def callback(impl):
  26. for module in modules:
  27. CALC_FLOPS[module] = impl
  28. return impl
  29. return callback
  30. @_register_modules(
  31. m.Conv2d,
  32. m.ConvTranspose2d,
  33. m.LocalConv2d,
  34. qm.Conv2d,
  35. qm.ConvRelu2d,
  36. qm.ConvBn2d,
  37. qm.ConvBnRelu2d,
  38. qatm.Conv2d,
  39. qatm.ConvRelu2d,
  40. qatm.ConvBn2d,
  41. qatm.ConvBnRelu2d,
  42. )
  43. def count_convNd(module, input, output):
  44. bias = 1 if module.bias is not None else 0
  45. group = module.groups
  46. ic = input[0].shape[1]
  47. oc = output[0].shape[1]
  48. goc = oc // group
  49. gic = ic // group
  50. N = output[0].shape[0]
  51. HW = np.prod(output[0].shape[2:])
  52. # N x Cout x H x W x (Cin x Kw x Kh + bias)
  53. return N * HW * goc * (gic * np.prod(module.kernel_size) + bias)
  54. @_register_modules(m.ConvTranspose2d)
  55. def count_deconvNd(module, input, output):
  56. return np.prod(input[0].shape) * output[0].shape[1] * np.prod(module.kernel_size)
  57. @_register_modules(m.Linear, qatm.Linear, qm.Linear)
  58. def count_linear(module, input, output):
  59. return np.prod(output[0].shape) * module.in_features
  60. # does not need import qat and quantized module since they inherit from float module.
  61. hook_modules = (
  62. m.Conv2d,
  63. m.ConvTranspose2d,
  64. m.LocalConv2d,
  65. m.BatchNorm2d,
  66. m.Linear,
  67. )
  68. def dict2table(list_of_dict, header):
  69. table_data = [header]
  70. for d in list_of_dict:
  71. row = []
  72. for h in header:
  73. v = ""
  74. if h in d:
  75. v = d[h]
  76. row.append(v)
  77. table_data.append(row)
  78. return table_data
  79. def sizeof_fmt(num, suffix="B"):
  80. for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
  81. if abs(num) < 1024.0:
  82. return "{:3.3f} {}{}".format(num, unit, suffix)
  83. num /= 1024.0
  84. sign_str = "-" if num < 0 else ""
  85. return "{}{:.1f} {}{}".format(sign_str, num, "Yi", suffix)
  86. def print_flops_stats(flops, bar_length_max=20):
  87. flops_list = [i["flops_num"] for i in flops]
  88. max_flops_num = max(flops_list + [0])
  89. # calc total flops and set flops_cum
  90. total_flops_num = 0
  91. for d in flops:
  92. total_flops_num += int(d["flops_num"])
  93. d["flops_cum"] = sizeof_fmt(total_flops_num, suffix="OPs")
  94. for d in flops:
  95. f = d["flops_num"]
  96. d["flops"] = sizeof_fmt(f, suffix="OPs")
  97. r = d["ratio"] = f / total_flops_num
  98. d["percentage"] = "{:.2f}%".format(r * 100)
  99. bar_length = int(f / max_flops_num * bar_length_max)
  100. d["bar"] = "#" * bar_length
  101. header = [
  102. "name",
  103. "class_name",
  104. "input_shapes",
  105. "output_shapes",
  106. "flops",
  107. "flops_cum",
  108. "percentage",
  109. "bar",
  110. ]
  111. total_flops_str = sizeof_fmt(total_flops_num, suffix="OPs")
  112. total_var_size = sum(
  113. sum(s[1] if len(s) > 1 else 0 for s in d["output_shapes"]) for d in flops
  114. )
  115. flops.append(
  116. dict(name="total", flops=total_flops_str, output_shapes=total_var_size)
  117. )
  118. logger.info("flops stats: \n" + tabulate.tabulate(dict2table(flops, header=header)))
  119. return total_flops_num
  120. def get_param_stats(param: np.ndarray):
  121. nbits = get_dtype_bit(param.dtype.name)
  122. shape = param.shape
  123. param_dim = np.prod(param.shape)
  124. param_size = param_dim * nbits // 8
  125. return {
  126. "shape": shape,
  127. "mean": param.mean(),
  128. "std": param.std(),
  129. "param_dim": param_dim,
  130. "nbits": nbits,
  131. "size": param_size,
  132. }
  133. def print_params_stats(params, bar_length_max=20):
  134. total_param_dims, total_param_size = 0, 0
  135. for d in params:
  136. total_param_dims += int(d["param_dim"])
  137. total_param_size += int(d["size"])
  138. ratio = d["size"] / total_param_size
  139. d["size"] = sizeof_fmt(d["size"])
  140. d["size_cum"] = sizeof_fmt(total_param_size)
  141. d["ratio"] = ratio
  142. d["percentage"] = "{:.2f}%".format(ratio * 100)
  143. # construct bar
  144. max_ratio = max([d["ratio"] for d in params])
  145. for d in params:
  146. bar_length = int(d["ratio"] / max_ratio * bar_length_max)
  147. d["size_bar"] = "#" * bar_length
  148. param_size = sizeof_fmt(total_param_size)
  149. params.append(dict(name="total", param_dim=total_param_dims, size=param_size,))
  150. header = [
  151. "name",
  152. "shape",
  153. "mean",
  154. "std",
  155. "param_dim",
  156. "bits",
  157. "size",
  158. "size_cum",
  159. "percentage",
  160. "size_bar",
  161. ]
  162. logger.info(
  163. "param stats: \n" + tabulate.tabulate(dict2table(params, header=header))
  164. )
  165. return total_param_dims, total_param_size
  166. def print_summary(**kwargs):
  167. data = [["item", "value"]]
  168. data.extend(list(kwargs.items()))
  169. logger.info("summary\n" + tabulate.tabulate(data))
  170. def module_stats(
  171. model: m.Module,
  172. input_size: int,
  173. bar_length_max: int = 20,
  174. log_params: bool = True,
  175. log_flops: bool = True,
  176. ):
  177. r"""
  178. Calculate and print ``model``'s statistics by adding hook and record Module's inputs outputs size.
  179. :param model: model that need to get stats info.
  180. :param input_size: size of input for running model and calculating stats.
  181. :param bar_length_max: size of bar indicating max flops or parameter size in net stats.
  182. :param log_params: whether print and record params size.
  183. :param log_flops: whether print and record op flops.
  184. """
  185. def module_stats_hook(module, input, output, name=""):
  186. class_name = str(module.__class__).split(".")[-1].split("'")[0]
  187. flops_fun = CALC_FLOPS.get(type(module))
  188. if callable(flops_fun):
  189. flops_num = flops_fun(module, input, output)
  190. if not isinstance(output, (list, tuple)):
  191. output = [output]
  192. flops.append(
  193. dict(
  194. name=name,
  195. class_name=class_name,
  196. input_shapes=[i.shape for i in input],
  197. output_shapes=[o.shape for o in output],
  198. flops_num=flops_num,
  199. flops_cum=0,
  200. )
  201. )
  202. if hasattr(module, "weight") and module.weight is not None:
  203. w = module.weight
  204. param_stats = get_param_stats(w.numpy())
  205. param_stats["name"] = name + "-w"
  206. params.append(param_stats)
  207. if hasattr(module, "bias") and module.bias is not None:
  208. b = module.bias
  209. param_stats = get_param_stats(b.numpy())
  210. param_stats["name"] = name + "-b"
  211. params.append(param_stats)
  212. # multiple inputs to the network
  213. if not isinstance(input_size[0], tuple):
  214. input_size = [input_size]
  215. params = []
  216. flops = []
  217. hooks = []
  218. for (name, module) in model.named_modules():
  219. if isinstance(module, hook_modules):
  220. hooks.append(
  221. module.register_forward_hook(partial(module_stats_hook, name=name))
  222. )
  223. inputs = [zeros(in_size, dtype=np.float32) for in_size in input_size]
  224. model.eval()
  225. model(*inputs)
  226. for h in hooks:
  227. h.remove()
  228. total_flops, total_params = 0, 0
  229. if log_params:
  230. total_param_dims, total_param_size = print_params_stats(params, bar_length_max)
  231. if log_flops:
  232. total_flops = print_flops_stats(flops, bar_length_max)
  233. extra_info = {
  234. "#params": len(params),
  235. "total_param_dims": sizeof_fmt(total_param_dims),
  236. "total_param_size": sizeof_fmt(total_param_size),
  237. "total_flops": sizeof_fmt(total_flops, suffix="OPs"),
  238. "flops/param_size": "{:3.3f}".format(total_flops / total_param_size),
  239. }
  240. print_summary(**extra_info)
  241. return total_params, total_flops

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台