You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_conv.py 6.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import itertools
  10. import numpy as np
  11. import pytest
  12. import megengine.module as M
  13. from megengine import Parameter, tensor
  14. from megengine.functional.debug_param import (
  15. get_execution_strategy,
  16. set_execution_strategy,
  17. )
  18. from megengine.module import ConvTranspose2d, ConvTranspose3d, LocalConv2d
  19. @pytest.fixture
  20. def reproducible():
  21. old = get_execution_strategy()
  22. set_execution_strategy("HEURISTIC_REPRODUCIBLE")
  23. yield
  24. set_execution_strategy(old)
  25. # NOTE: test in module for convenience. should really test in functional
  26. @pytest.mark.parametrize(
  27. "name",
  28. ["Conv1d", "Conv2d", "Conv3d", "ConvTranspose2d", "ConvTranspose3d", "LocalConv2d"],
  29. )
  30. def test_conv_dtype_promotion(name, reproducible):
  31. N, Ci, Co, K = 2, 16, 32, 3
  32. S = (7,) * int(name[-2])
  33. if "Local" in name:
  34. m = getattr(M, name)(Ci, Co, *S, K)
  35. else:
  36. m = getattr(M, name)(Ci, Co, K)
  37. x = tensor(np.random.random(size=(N, Ci) + S).astype("float16"))
  38. np.testing.assert_equal(m(x).numpy(), m(x.astype("float32")).numpy())
  39. def test_conv_transpose2d():
  40. SH, SW = 3, 1
  41. PH, PW = 2, 0
  42. N, IC, IH, IW = 4, 5, 8, 6
  43. KH, KW = 3, 4
  44. OC = 3
  45. BIAS = False
  46. def getsize(inp, kern, stride):
  47. return (inp - 1) * stride + kern
  48. OH = getsize(IH, KH, SH)
  49. OW = getsize(IW, KW, SW)
  50. inp = np.random.normal(size=(N, IC, IH, IW)).astype(np.float32)
  51. out = np.zeros((N, OC, OH, OW), dtype=np.float32)
  52. weight = np.random.normal(size=(IC, OC, KH, KW)).astype(np.float32)
  53. bias = np.random.normal(size=(1, OC, 1, 1)).astype(np.float32)
  54. # naive calculation use numpy
  55. for n, ic, ih, iw in itertools.product(*map(range, [N, IC, IH, IW])):
  56. oh, ow = ih * SH, iw * SW
  57. out[n, :, oh : oh + KH, ow : ow + KW] += inp[n, ic, ih, iw] * weight[ic]
  58. out = out[:, :, PH : OH - PH, PW : OW - PW]
  59. if BIAS:
  60. out += bias
  61. # megengine conv_transpose2d calculation
  62. conv_transpose2d = ConvTranspose2d(IC, OC, (KH, KW), (SH, SW), (PH, PW), bias=BIAS)
  63. conv_transpose2d.weight = Parameter(weight, dtype=np.float32)
  64. if BIAS:
  65. conv_transpose2d.bias = Parameter(bias, dtype=np.float32)
  66. y = conv_transpose2d(tensor(inp))
  67. np.testing.assert_almost_equal(out, y.numpy(), 2e-6)
  68. def test_local_conv2d():
  69. def test_func(
  70. batch_size,
  71. in_channels,
  72. out_channels,
  73. input_height,
  74. input_width,
  75. kernel_size,
  76. stride,
  77. padding,
  78. dilation,
  79. groups,
  80. ):
  81. local_conv2d = LocalConv2d(
  82. in_channels=in_channels,
  83. out_channels=out_channels,
  84. input_height=input_height,
  85. input_width=input_width,
  86. kernel_size=kernel_size,
  87. stride=stride,
  88. padding=padding,
  89. dilation=dilation,
  90. groups=groups,
  91. )
  92. inputs = np.random.normal(
  93. size=(batch_size, in_channels, input_height, input_width)
  94. ).astype(np.float32)
  95. output_height = (input_height + padding * 2 - kernel_size) // stride + 1
  96. output_width = (input_width + padding * 2 - kernel_size) // stride + 1
  97. weights = local_conv2d.weight.numpy()
  98. outputs = local_conv2d(tensor(inputs))
  99. # naive calculation use numpy
  100. # only test output_height == input_height, output_width == input_width
  101. inputs = np.pad(inputs, ((0, 0), (0, 0), (1, 1), (1, 1)))
  102. expected = np.zeros(
  103. (batch_size, out_channels, output_height, output_width), dtype=np.float32,
  104. )
  105. ic_group_size = in_channels // groups
  106. oc_group_size = out_channels // groups
  107. for n, oc, oh, ow in itertools.product(
  108. *map(range, [batch_size, out_channels, output_height, output_width])
  109. ):
  110. ih, iw = oh * stride, ow * stride
  111. g_id = oc // oc_group_size
  112. expected[n, oc, ih, iw] = np.sum(
  113. inputs[
  114. n,
  115. g_id * ic_group_size : (g_id + 1) * ic_group_size,
  116. ih : ih + kernel_size,
  117. iw : iw + kernel_size,
  118. ]
  119. * weights[g_id, oh, ow, :, :, :, oc % oc_group_size]
  120. )
  121. np.testing.assert_almost_equal(outputs.numpy(), expected, 1e-5)
  122. test_func(10, 4, 4, 5, 5, 3, 1, 1, 1, 1)
  123. test_func(10, 32, 32, 8, 8, 3, 1, 1, 1, 2)
  124. test_func(10, 32, 32, 8, 8, 3, 1, 1, 1, 4)
  125. def test_conv_transpose3d():
  126. def getsize(inp, kernel, stride, dilate):
  127. return (inp - 1) * stride + kernel * dilate - dilate + 1
  128. def test_func(
  129. N,
  130. IC,
  131. ID,
  132. IH,
  133. IW,
  134. OC,
  135. KD,
  136. KH,
  137. KW,
  138. SD,
  139. SH,
  140. SW,
  141. PD,
  142. PH,
  143. PW,
  144. DD,
  145. DH,
  146. DW,
  147. bias=True,
  148. ):
  149. conv_transpose3d = ConvTranspose3d(
  150. in_channels=IC,
  151. out_channels=OC,
  152. kernel_size=(KD, KH, KW),
  153. stride=(SD, SH, SW),
  154. padding=(PD, PH, PW),
  155. dilation=(DD, DH, DW),
  156. bias=bias,
  157. )
  158. OD = getsize(ID, KD, SD, DD)
  159. OH = getsize(IH, KH, SH, DH)
  160. OW = getsize(IW, KW, SW, DW)
  161. inp = np.random.normal(size=(N, IC, ID, IH, IW))
  162. weight = np.random.normal(size=(IC, OC, KD, KH, KW))
  163. out_np = np.zeros((N, OC, OD, OH, OW), dtype=np.float32)
  164. for n, ic, idepth, ih, iw in itertools.product(
  165. *map(range, [N, IC, ID, IH, IW])
  166. ):
  167. od, oh, ow = idepth * SD, ih * SH, iw * SW
  168. out_np[n, :, od : od + KD, oh : oh + KH, ow : ow + KW] += (
  169. inp[n, ic, idepth, ih, iw] * weight[ic]
  170. )
  171. out_np = out_np[:, :, PD : OD - PD, PH : OH - PH, PW : OW - PW]
  172. assert conv_transpose3d.weight.numpy().shape == weight.shape
  173. conv_transpose3d.weight = Parameter(weight)
  174. out_meg = conv_transpose3d.forward(tensor(inp))
  175. np.testing.assert_almost_equal(out_meg.numpy(), out_np, 1e-5)
  176. test_func(4, 3, 8, 16, 16, 8, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1)
  177. test_func(4, 8, 16, 32, 32, 16, 1, 3, 1, 2, 1, 2, 0, 1, 0, 1, 1, 1)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台