You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

warp_perspective.cpp 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /**
  2. * \file dnn/test/common/warp_perspective.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "test/common/warp_perspective.h"
  13. #include "test/common/benchmarker.h"
  14. #include "test/common/checker.h"
  15. #include "test/common/task_record_check.h"
  16. using namespace megdnn;
  17. using namespace test;
  18. using namespace warp_perspective;
  19. void WarpPerspectiveMatIdxProxy::deduce_layout(WarpPerspective*, TensorLayoutArray&) {}
  20. void WarpPerspectiveMatIdxProxy::deduce_layout(
  21. WarpPerspectiveBackwardData*, TensorLayoutArray&) {}
  22. void WarpPerspectiveMatIdxProxy::deduce_layout(
  23. WarpPerspectiveBackwardMat*, TensorLayoutArray&) {}
  24. void WarpPerspectiveMatIdxProxy::exec(
  25. WarpPerspective* opr, const TensorNDArray& tensors) {
  26. if (!W.valid()) {
  27. W = WorkspaceWrapper(opr->handle(), 0);
  28. }
  29. megdnn_assert(tensors.size() == 4);
  30. W.update(opr->get_workspace_in_bytes(
  31. tensors[0].layout, tensors[1].layout, tensors[2].layout,
  32. tensors[3].layout));
  33. opr->exec(tensors[0], tensors[1], tensors[2], tensors[3], W.workspace());
  34. }
  35. void WarpPerspectiveMatIdxProxy::exec(
  36. WarpPerspectiveBackwardData* opr, const TensorNDArray& tensors) {
  37. if (!W.valid()) {
  38. W = WorkspaceWrapper(opr->handle(), 0);
  39. }
  40. megdnn_assert(tensors.size() == 4);
  41. W.update(opr->get_workspace_in_bytes(
  42. tensors[0].layout, tensors[1].layout, tensors[2].layout,
  43. tensors[3].layout));
  44. opr->exec(tensors[0], tensors[1], tensors[2], tensors[3], W.workspace());
  45. }
  46. void WarpPerspectiveMatIdxProxy::exec(
  47. WarpPerspectiveBackwardMat* opr, const TensorNDArray& tensors) {
  48. if (!W.valid()) {
  49. W = WorkspaceWrapper(opr->handle(), 0);
  50. }
  51. megdnn_assert(tensors.size() == 5);
  52. W.update(opr->get_workspace_in_bytes(
  53. tensors[0].layout, tensors[1].layout, tensors[2].layout, tensors[3].layout,
  54. tensors[4].layout));
  55. opr->exec(
  56. tensors[0], tensors[1], tensors[2], tensors[3], tensors[4], W.workspace());
  57. }
  58. std::vector<TestArg> warp_perspective::get_cv_args() {
  59. std::vector<TestArg> args;
  60. // in warp_perspective_cv INTER_AREA == INTER_LINEAR
  61. using BorderMode = param::WarpPerspective::BorderMode;
  62. using InterpolationMode = param::WarpPerspective::InterpolationMode;
  63. param::WarpPerspective cur_param;
  64. for (size_t i = 4; i < 129; i *= 4) {
  65. for (size_t ic : {1, 2, 3}) {
  66. for (BorderMode bmode : {
  67. BorderMode::REPLICATE,
  68. BorderMode::REFLECT,
  69. BorderMode::REFLECT_101,
  70. BorderMode::WRAP,
  71. BorderMode::CONSTANT,
  72. }) {
  73. for (InterpolationMode imode :
  74. {InterpolationMode::NEAREST, InterpolationMode::LINEAR,
  75. InterpolationMode::CUBIC, InterpolationMode::LANCZOS4}) {
  76. cur_param.bmode = bmode;
  77. cur_param.format = param::WarpPerspective::Format::NHWC;
  78. cur_param.imode = imode;
  79. args.emplace_back(
  80. cur_param, TensorShape{1, i, i, ic}, TensorShape{1, 3, 3},
  81. TensorShape{1}, TensorShape{1, i, i, ic});
  82. args.emplace_back(
  83. cur_param, TensorShape{1, i, i * 2, ic},
  84. TensorShape{1, 3, 3}, TensorShape{1},
  85. TensorShape{1, i, i * 2, ic});
  86. args.emplace_back(
  87. cur_param, TensorShape{1, i * 3, i, ic},
  88. TensorShape{1, 3, 3}, TensorShape{1},
  89. TensorShape{1, i * 3, i, ic});
  90. cur_param.border_val = 0.78f;
  91. args.emplace_back(
  92. cur_param, TensorShape{1, i, i, ic}, TensorShape{1, 3, 3},
  93. TensorShape{1}, TensorShape{1, 8, 8, ic});
  94. args.emplace_back(
  95. cur_param, TensorShape{1, i, i * 2, ic},
  96. TensorShape{1, 3, 3}, TensorShape{1},
  97. TensorShape{1, 8, 8, ic});
  98. args.emplace_back(
  99. cur_param, TensorShape{1, i * 3, i, ic},
  100. TensorShape{1, 3, 3}, TensorShape{1},
  101. TensorShape{1, 8, 8, ic});
  102. }
  103. }
  104. }
  105. }
  106. return args;
  107. }
  108. void warp_perspective::run_mat_idx_test(Handle* handle) {
  109. constexpr int N_SRC = 5;
  110. Checker<WarpPerspectiveForward, WarpPerspectiveMatIdxProxy> checker(handle);
  111. WarpPerspectiveMatRNG mat_rng;
  112. checker.set_rng(1, &mat_rng);
  113. UniformIntRNG mat_idx_rng{0, N_SRC - 1};
  114. checker.set_dtype(2, dtype::Int32());
  115. checker.set_rng(2, &mat_idx_rng);
  116. WarpPerspective::Param param;
  117. param.bmode = WarpPerspective::Param::BorderMode::REFLECT;
  118. param.imode = param::WarpPerspective::InterpolationMode::LINEAR;
  119. checker.set_param(param);
  120. checker.execs({{N_SRC, 3, 10, 11}, {2, 3, 3}, {2}, {2, 3, 11, 12}});
  121. checker.execs({{N_SRC, 14, 17, 13}, {123, 3, 3}, {123}, {123, 14, 16, 15}});
  122. // test NHWC
  123. param.format = WarpPerspective::Param::Format::NHWC;
  124. checker.set_param(param)
  125. .set_rng(2, &mat_idx_rng)
  126. .set_epsilon(1e-1)
  127. .set_dtype(2, dtype::Int32());
  128. checker.execs({{N_SRC, 10, 11, 3}, {2, 3, 3}, {2}, {2, 11, 12, 3}});
  129. }
  130. void warp_perspective::run_int8_test_record(int debug_level) {
  131. using Param = WarpPerspective::Param;
  132. TaskRecordChecker<WarpPerspectiveForward> checker(debug_level);
  133. UniformIntRNG input_rng{-128, 127};
  134. WarpPerspectiveMatRNG mat_rng;
  135. class ResizeBy2xMatRNG : public RNG {
  136. void gen(const TensorND& tensor_) override {
  137. float* ptr = tensor_.ptr<float>();
  138. auto N = tensor_.layout.shape[0];
  139. megdnn_assert(
  140. tensor_.layout.is_contiguous() && tensor_.layout.ndim == 3 &&
  141. tensor_.layout[1] == 3 && tensor_.layout[2] == 3);
  142. for (size_t n = 0; n < N; ++n) {
  143. // | 1 0 0 |
  144. // mat = | 0 1 0 |
  145. // | 0 0 2 |
  146. // resize_2x
  147. ptr[0] = ptr[4] = 1;
  148. ptr[8] = 2;
  149. ptr[1] = ptr[2] = ptr[3] = ptr[5] = ptr[6] = ptr[7] = 0;
  150. ptr += 9;
  151. }
  152. }
  153. } resize_2x_mat_rng;
  154. checker.set_rng(0, &input_rng)
  155. .set_rng(1, &mat_rng)
  156. .set_dtype(0, dtype::Int8())
  157. .set_dtype(1, dtype::Float32())
  158. .set_dtype(2, dtype::Int8())
  159. .set_param(
  160. {Param::InterpolationMode::LINEAR, Param::BorderMode::CONSTANT,
  161. Param::Format::NCHW, 0.f});
  162. checker.execs({{99, 48, 17, 17}, {99, 3, 3}, {99, 48, 22, 22}})
  163. .execs({{12, 3, 224, 224}, {12, 3, 3}, {12, 3, 256, 256}});
  164. checker.set_rng(1, &resize_2x_mat_rng);
  165. checker.execs({{98, 48, 17, 17}, {98, 3, 3}, {98, 48, 34, 34}})
  166. .execs({{13, 3, 224, 224}, {13, 3, 3}, {13, 3, 448, 448}});
  167. }
  168. void warp_perspective::run_int8_test(Handle* handle) {
  169. using Param = WarpPerspective::Param;
  170. Checker<WarpPerspectiveForward> checker(handle);
  171. UniformIntRNG input_rng{-128, 127};
  172. WarpPerspectiveMatRNG mat_rng;
  173. class ResizeBy2xMatRNG : public RNG {
  174. void gen(const TensorND& tensor_) override {
  175. float* ptr = tensor_.ptr<float>();
  176. auto N = tensor_.layout.shape[0];
  177. megdnn_assert(
  178. tensor_.layout.is_contiguous() && tensor_.layout.ndim == 3 &&
  179. tensor_.layout[1] == 3 && tensor_.layout[2] == 3);
  180. for (size_t n = 0; n < N; ++n) {
  181. // | 1 0 0 |
  182. // mat = | 0 1 0 |
  183. // | 0 0 2 |
  184. // resize_2x
  185. ptr[0] = ptr[4] = 1;
  186. ptr[8] = 2;
  187. ptr[1] = ptr[2] = ptr[3] = ptr[5] = ptr[6] = ptr[7] = 0;
  188. ptr += 9;
  189. }
  190. }
  191. } resize_2x_mat_rng;
  192. if (handle->type() == Handle::HandleType::CUDA) {
  193. // As currently the computation is performed in floating points instead
  194. // of full int, it could be slightly different on GPU.
  195. checker.set_epsilon(1.1).set_max_avg_error(7e-5);
  196. }
  197. checker.set_rng(0, &input_rng)
  198. .set_rng(1, &mat_rng)
  199. .set_dtype(0, dtype::Int8())
  200. .set_dtype(1, dtype::Float32())
  201. .set_dtype(2, dtype::Int8())
  202. .set_param(
  203. {Param::InterpolationMode::LINEAR, Param::BorderMode::CONSTANT,
  204. Param::Format::NCHW, 0.f});
  205. checker.execs({{99, 48, 17, 17}, {99, 3, 3}, {99, 48, 22, 22}})
  206. .execs({{12, 3, 224, 224}, {12, 3, 3}, {12, 3, 256, 256}});
  207. checker.set_rng(1, &resize_2x_mat_rng);
  208. checker.execs({{98, 48, 17, 17}, {98, 3, 3}, {98, 48, 34, 34}})
  209. .execs({{13, 3, 224, 224}, {13, 3, 3}, {13, 3, 448, 448}});
  210. }
  211. void warp_perspective::run_quint8_test(Handle* handle) {
  212. using Param = WarpPerspective::Param;
  213. Checker<WarpPerspectiveForward> checker(handle);
  214. UniformIntRNG input_rng{0, 255};
  215. WarpPerspectiveMatRNG mat_rng;
  216. class ResizeBy2xMatRNG : public RNG {
  217. void gen(const TensorND& tensor_) override {
  218. float* ptr = tensor_.ptr<float>();
  219. auto N = tensor_.layout.shape[0];
  220. megdnn_assert(
  221. tensor_.layout.is_contiguous() && tensor_.layout.ndim == 3 &&
  222. tensor_.layout[1] == 3 && tensor_.layout[2] == 3);
  223. for (size_t n = 0; n < N; ++n) {
  224. // | 1 0 0 |
  225. // mat = | 0 1 0 |
  226. // | 0 0 2 |
  227. // resize_2x
  228. ptr[0] = ptr[4] = 1;
  229. ptr[8] = 2;
  230. ptr[1] = ptr[2] = ptr[3] = ptr[5] = ptr[6] = ptr[7] = 0;
  231. ptr += 9;
  232. }
  233. }
  234. } resize_2x_mat_rng;
  235. if (handle->type() == Handle::HandleType::CUDA) {
  236. // As currently the computation is performed in floating points instead
  237. // of full int, it could be slightly different on GPU.
  238. checker.set_epsilon(1.1).set_max_avg_error(7e-5);
  239. }
  240. checker.set_rng(0, &input_rng)
  241. .set_rng(1, &mat_rng)
  242. .set_dtype(0, dtype::Quantized8Asymm(0.6f, static_cast<uint8_t>(127)))
  243. .set_dtype(1, dtype::Float32())
  244. .set_dtype(2, dtype::Quantized8Asymm(0.6f, static_cast<uint8_t>(127)))
  245. .set_param(
  246. {Param::InterpolationMode::LINEAR, Param::BorderMode::CONSTANT,
  247. Param::Format::NCHW, 0.f});
  248. checker.execs({{99, 48, 17, 17}, {99, 3, 3}, {99, 48, 22, 22}})
  249. .execs({{12, 3, 224, 224}, {12, 3, 3}, {12, 3, 256, 256}});
  250. checker.set_rng(1, &resize_2x_mat_rng);
  251. checker.execs({{98, 48, 17, 17}, {98, 3, 3}, {98, 48, 34, 34}})
  252. .execs({{13, 3, 224, 224}, {13, 3, 3}, {13, 3, 448, 448}});
  253. }
  254. // vim: syntax=cpp.doxygen