You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

warp_perspective.cpp 19 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /**
  2. * \file dnn/test/naive/warp_perspective.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "test/naive/fixture.h"
  12. #include "megdnn/oprs/cv.h"
  13. #include "test/common/checker.h"
  14. #include "test/common/warp_perspective.h"
  15. #include "megdnn/tensor_format.h"
  16. #include "test/common/benchmarker.h"
  17. using namespace megdnn;
  18. using namespace test;
  19. namespace {
  20. class NanMatRNG : public RNG {
  21. void gen(const TensorND& tensor_) override {
  22. auto& gen = RandomState::generator();
  23. std::uniform_real_distribution<dt_float32> pdist3(1.9f, 2.1f);
  24. std::uniform_real_distribution<dt_float32> pdist(0.9f, 1.1f);
  25. std::uniform_real_distribution<dt_float32> pdisth(0.4f, 0.6f);
  26. std::uniform_real_distribution<dt_float32> ndist(-1.1f, -0.9f);
  27. std::uniform_real_distribution<dt_float32> ndist3(-2.1f, -1.9f);
  28. std::uniform_real_distribution<dt_float32> ndisth(-0.6f, -0.4f);
  29. std::uniform_int_distribution<int> dice(0, 5);
  30. float* ptr = tensor_.ptr<dt_float32>();
  31. auto N = tensor_.layout.shape[0];
  32. for (size_t n = 0; n < N; ++n) {
  33. for (size_t i = 0; i < 9; ++i) {
  34. switch (dice(gen)) {
  35. case 0:
  36. ptr[i] = pdist3(gen);
  37. break;
  38. case 1:
  39. ptr[i] = pdist(gen);
  40. break;
  41. case 2:
  42. ptr[i] = pdisth(gen);
  43. break;
  44. case 3:
  45. ptr[i] = ndist(gen);
  46. break;
  47. case 4:
  48. ptr[i] = ndist3(gen);
  49. break;
  50. case 5:
  51. ptr[i] = ndisth(gen);
  52. break;
  53. }
  54. }
  55. ptr[6] = 1;
  56. ptr[7] = -1;
  57. ptr[8] = 5;
  58. ptr += 9;
  59. }
  60. }
  61. };
  62. } // namespace
  63. TEST_F(NAIVE, WARP_PERSPECTIVE_NCHW4) {
  64. using Param = WarpPerspective::Param;
  65. auto convert_true_format = [](const TensorLayout& layout) {
  66. if (layout.ndim == 4)
  67. return layout
  68. .reshape(
  69. {layout[0], layout[1] / 4, layout[2], layout[3], 4})
  70. .dimshuffle({0, 1, 4, 2, 3});
  71. else
  72. return layout;
  73. };
  74. WarpPerspective::Param param;
  75. auto extra_impl = [&param, this,
  76. convert_true_format](const TensorNDArray& tensors) {
  77. auto warp_perspective = handle()->create_operator<WarpPerspective>();
  78. warp_perspective->param() = param;
  79. warp_perspective->param().format = Param::Format::NCHW;
  80. TensorNDArray nchw_tensors;
  81. for (size_t i = 0; i < tensors.size(); ++i) {
  82. auto layout = tensors[i].layout;
  83. if (layout.dtype.enumv() == DTypeEnum::QuantizedS8)
  84. layout.dtype = dtype::Int8();
  85. if (layout.ndim == 5) {
  86. layout = layout.reshape({layout[0], layout[1] * layout[4],
  87. layout[2], layout[3]});
  88. }
  89. nchw_tensors.emplace_back(malloc(layout.span().dist_byte()),
  90. layout);
  91. }
  92. TensorNDArray nchw4_tensors;
  93. for (size_t i = 0; i < tensors.size(); ++i) {
  94. auto layout = convert_true_format(nchw_tensors[i].layout);
  95. nchw4_tensors.emplace_back(tensors[i].raw_ptr, std::move(layout));
  96. }
  97. auto workspace_size = warp_perspective->get_workspace_in_bytes(
  98. tensors[0].layout, tensors[1].layout, tensors[2].layout);
  99. dt_byte* workspace_ptr = static_cast<dt_byte*>(malloc(workspace_size));
  100. Workspace workspace{workspace_ptr, workspace_size};
  101. auto relayout = handle()->create_operator<RelayoutForward>();
  102. relayout->exec(nchw4_tensors[0], nchw_tensors[0]);
  103. relayout->exec(nchw4_tensors[1], nchw_tensors[1]);
  104. warp_perspective->exec(nchw_tensors[0], nchw_tensors[1],
  105. nchw_tensors[2], workspace);
  106. relayout->exec(nchw_tensors[2], nchw4_tensors[2]);
  107. free(workspace_ptr);
  108. for (auto&& tensor : nchw_tensors) {
  109. free(tensor.raw_ptr);
  110. }
  111. };
  112. Checker<WarpPerspectiveForward> checker(handle());
  113. WarpPerspectiveMatRNG rng;
  114. checker.set_rng(1, &rng);
  115. checker.set_dtype(0, dtype::QuantizedS8(0.1f));
  116. checker.set_dtype(2, dtype::QuantizedS8(0.1f));
  117. checker.set_extra_opr_impl(extra_impl);
  118. for (auto bmode : {WarpPerspective::BorderMode::WRAP,
  119. WarpPerspective::BorderMode::REFLECT,
  120. WarpPerspective::BorderMode::REPLICATE,
  121. WarpPerspective::BorderMode::CONSTANT}) {
  122. param.border_val = 0.3f;
  123. param.bmode = bmode;
  124. param.imode = Param::InterpolationMode::LINEAR;
  125. param.format = Param::Format::NCHW4;
  126. checker.set_param(param);
  127. checker.execs({{2, 1, 10, 11, 4}, {2, 3, 3}, {2, 1, 11, 12, 4}});
  128. checker.execs({{20, 300, 10, 11, 4}, {20, 3, 3}, {20, 300, 11, 12, 4}});
  129. checker.execs(
  130. {{2200, 3, 10, 11, 4}, {2200, 3, 3}, {2200, 3, 11, 12, 4}});
  131. checker.execs({{1, 25, 25, 25, 4}, {1, 3, 3}, {1, 25, 25, 510, 4}});
  132. checker.execs({{1, 25, 25, 510, 4}, {1, 3, 3}, {1, 25, 25, 25, 4}});
  133. checker.execs({{1, 25, 25, 25, 4}, {1, 3, 3}, {1, 25, 51, 51, 4}});
  134. checker.execs({{1, 25, 51, 51, 4}, {1, 3, 3}, {1, 25, 25, 25, 4}});
  135. break;
  136. }
  137. }
  138. TEST_F(NAIVE, WARP_PERSPECTIVE) {
  139. Checker<WarpPerspective> checker(handle(), false);
  140. WarpPerspective::Param param;
  141. param.bmode = WarpPerspective::Param::BorderMode::BORDER_REFLECT;
  142. param.imode = WarpPerspective::Param::InterpolationMode::LINEAR;
  143. param.format = WarpPerspective::Param::Format::NCHW;
  144. checker.set_param(param).exect(
  145. Testcase{TensorValue({1, 1, 3, 3}, dtype::Uint8{},
  146. {131, 255, 180, 245, 8, 0, 10, 3, 178}),
  147. TensorValue({1, 3, 3}, dtype::Float32{},
  148. {1.2f, 1.2f, 0.6f, -1.05f, -2.0f, -0.7f, 1.3f,
  149. 1.5f, 3.0f}),
  150. {}},
  151. Testcase{{},
  152. {},
  153. TensorValue({1, 1, 2, 2}, dtype::Uint8{},
  154. {156, 183, 181, 195})});
  155. checker.set_param(param).exect(
  156. Testcase{TensorValue({1, 1, 3, 3},
  157. dtype::Quantized8Asymm{
  158. 1.4f, static_cast<uint8_t>(127)},
  159. {131, 255, 180, 245, 8, 0, 10, 3, 178}),
  160. TensorValue({1, 3, 3}, dtype::Float32{},
  161. {1.2f, 1.2f, 0.6f, -1.05f, -2.0f, -0.7f, 1.3f,
  162. 1.5f, 3.0f}),
  163. {}},
  164. Testcase{{},
  165. {},
  166. TensorValue({1, 1, 2, 2},
  167. dtype::Quantized8Asymm{
  168. 1.4f, static_cast<uint8_t>(127)},
  169. {156, 183, 181, 195})});
  170. }
  171. TEST_F(NAIVE_MULTI_THREADS, WARP_PERSPECTIVE_NCHW4) {
  172. using Param = WarpPerspective::Param;
  173. auto convert_true_format = [](const TensorLayout& layout) {
  174. if (layout.ndim == 4)
  175. return layout
  176. .reshape(
  177. {layout[0], layout[1] / 4, layout[2], layout[3], 4})
  178. .dimshuffle({0, 1, 4, 2, 3});
  179. else
  180. return layout;
  181. };
  182. WarpPerspective::Param param;
  183. auto extra_impl = [&param, this,
  184. convert_true_format](const TensorNDArray& tensors) {
  185. auto warp_perspective = handle()->create_operator<WarpPerspective>();
  186. warp_perspective->param() = param;
  187. warp_perspective->param().format = Param::Format::NCHW;
  188. TensorNDArray nchw_tensors;
  189. for (size_t i = 0; i < tensors.size(); ++i) {
  190. auto layout = tensors[i].layout;
  191. if (layout.dtype.enumv() == DTypeEnum::QuantizedS8)
  192. layout.dtype = dtype::Int8();
  193. if (layout.ndim == 5) {
  194. layout = layout.reshape({layout[0], layout[1] * layout[4],
  195. layout[2], layout[3]});
  196. }
  197. nchw_tensors.emplace_back(malloc(layout.span().dist_byte()),
  198. layout);
  199. }
  200. TensorNDArray nchw4_tensors;
  201. for (size_t i = 0; i < tensors.size(); ++i) {
  202. auto layout = convert_true_format(nchw_tensors[i].layout);
  203. nchw4_tensors.emplace_back(tensors[i].raw_ptr, std::move(layout));
  204. }
  205. auto workspace_size = warp_perspective->get_workspace_in_bytes(
  206. tensors[0].layout, tensors[1].layout, tensors[2].layout);
  207. dt_byte* workspace_ptr = static_cast<dt_byte*>(malloc(workspace_size));
  208. Workspace workspace{workspace_ptr, workspace_size};
  209. auto relayout = handle()->create_operator<RelayoutForward>();
  210. relayout->exec(nchw4_tensors[0], nchw_tensors[0]);
  211. relayout->exec(nchw4_tensors[1], nchw_tensors[1]);
  212. warp_perspective->exec(nchw_tensors[0], nchw_tensors[1],
  213. nchw_tensors[2], workspace);
  214. relayout->exec(nchw_tensors[2], nchw4_tensors[2]);
  215. free(workspace_ptr);
  216. for (auto&& tensor : nchw_tensors) {
  217. free(tensor.raw_ptr);
  218. }
  219. };
  220. Checker<WarpPerspectiveForward> checker(handle());
  221. WarpPerspectiveMatRNG rng;
  222. checker.set_rng(1, &rng);
  223. checker.set_dtype(0, dtype::QuantizedS8(0.1f));
  224. checker.set_dtype(2, dtype::QuantizedS8(0.1f));
  225. checker.set_extra_opr_impl(extra_impl);
  226. for (auto bmode : {WarpPerspective::BorderMode::WRAP,
  227. WarpPerspective::BorderMode::REFLECT,
  228. WarpPerspective::BorderMode::REPLICATE,
  229. WarpPerspective::BorderMode::CONSTANT}) {
  230. param.border_val = 0.3f;
  231. param.bmode = bmode;
  232. param.imode = Param::InterpolationMode::LINEAR;
  233. param.format = Param::Format::NCHW4;
  234. checker.set_param(param);
  235. checker.execs({{2, 1, 10, 11, 4}, {2, 3, 3}, {2, 1, 11, 12, 4}});
  236. checker.execs({{20, 300, 10, 11, 4}, {20, 3, 3}, {20, 300, 11, 12, 4}});
  237. checker.execs(
  238. {{2200, 3, 10, 11, 4}, {2200, 3, 3}, {2200, 3, 11, 12, 4}});
  239. checker.execs({{1, 25, 25, 25, 4}, {1, 3, 3}, {1, 25, 25, 510, 4}});
  240. checker.execs({{1, 25, 25, 510, 4}, {1, 3, 3}, {1, 25, 25, 25, 4}});
  241. checker.execs({{1, 25, 25, 25, 4}, {1, 3, 3}, {1, 25, 51, 51, 4}});
  242. checker.execs({{1, 25, 51, 51, 4}, {1, 3, 3}, {1, 25, 25, 25, 4}});
  243. break;
  244. }
  245. }
  246. TEST_F(NAIVE_MULTI_THREADS, WARP_PERSPECTIVE) {
  247. Checker<WarpPerspective> checker(handle(), false);
  248. WarpPerspective::Param param;
  249. param.bmode = WarpPerspective::Param::BorderMode::BORDER_REFLECT;
  250. param.imode = WarpPerspective::Param::InterpolationMode::LINEAR;
  251. param.format = WarpPerspective::Param::Format::NCHW;
  252. checker.set_param(param).exect(
  253. Testcase{TensorValue({1, 1, 3, 3}, dtype::Uint8{},
  254. {131, 255, 180, 245, 8, 0, 10, 3, 178}),
  255. TensorValue({1, 3, 3}, dtype::Float32{},
  256. {1.2f, 1.2f, 0.6f, -1.05f, -2.0f, -0.7f, 1.3f,
  257. 1.5f, 3.0f}),
  258. {}},
  259. Testcase{{},
  260. {},
  261. TensorValue({1, 1, 2, 2}, dtype::Uint8{},
  262. {156, 183, 181, 195})});
  263. checker.set_param(param).exect(
  264. Testcase{TensorValue({1, 1, 3, 3},
  265. dtype::Quantized8Asymm{
  266. 1.4f, static_cast<uint8_t>(127)},
  267. {131, 255, 180, 245, 8, 0, 10, 3, 178}),
  268. TensorValue({1, 3, 3}, dtype::Float32{},
  269. {1.2f, 1.2f, 0.6f, -1.05f, -2.0f, -0.7f, 1.3f,
  270. 1.5f, 3.0f}),
  271. {}},
  272. Testcase{{},
  273. {},
  274. TensorValue({1, 1, 2, 2},
  275. dtype::Quantized8Asymm{
  276. 1.4f, static_cast<uint8_t>(127)},
  277. {156, 183, 181, 195})});
  278. }
  279. TEST_F(NAIVE_MULTI_THREADS, WARP_PERSPECTIVE_FORWARD_HWCD4) {
  280. auto handle_multi_thread = handle();
  281. Checker<WarpPerspective> checker(handle(), false);
  282. TensorFormat img_fmt =
  283. Image2DPack4TensorFormat::make(2, handle_multi_thread);
  284. checker.set_fmt(0, img_fmt).set_fmt(2, img_fmt);
  285. for (auto dtype : std::vector<DType>{
  286. dtype::Float32(), dtype::Float16(), dtype::QuantizedS8(4.3f),
  287. dtype::Quantized8Asymm(2.4f, static_cast<uint8_t>(10))}) {
  288. for (auto bmode : {WarpPerspective::BorderMode::WRAP,
  289. WarpPerspective::BorderMode::REFLECT,
  290. WarpPerspective::BorderMode::CONSTANT,
  291. WarpPerspective::BorderMode::REPLICATE,
  292. WarpPerspective::BorderMode::CONSTANT}) {
  293. WarpPerspectiveMatRNG rng;
  294. checker.set_rng(1, &rng);
  295. WarpPerspective::Param param;
  296. param.border_val = 0.3f;
  297. param.bmode = bmode;
  298. param.imode = param::WarpPerspective::InterpolationMode::LINEAR;
  299. param.format = param::WarpPerspective::Format::NHWCD4;
  300. if (dtype == dtype::Float16()) {
  301. //! if exists error, the value of a result pixel maybe another
  302. //! pixel in the origin image, so we just consider the avg error
  303. checker.set_epsilon(2e-1);
  304. checker.set_max_avg_error(1e-2);
  305. }
  306. checker.set_param(param);
  307. checker.set_dtype(0, dtype);
  308. checker.set_dtype(2, dtype);
  309. if (dtype.category() == DTypeCategory::FLOAT) {
  310. checker.set_dtype(1, dtype);
  311. } else {
  312. checker.set_dtype(1, dtype::Float32());
  313. }
  314. checker.execs({{2, 10, 1, 11, 4}, {2, 3, 3}, {2, 11, 1, 12, 4}});
  315. checker.execs({{22, 10, 1, 11, 4}, {22, 3, 3}, {22, 11, 1, 12, 4}});
  316. }
  317. }
  318. // nan case
  319. NanMatRNG rng_nan;
  320. UniformFloatRNG rng_zero(0, 0);
  321. //! NanMatRng not support float16, I have to reset dtype to Float32
  322. checker.set_dtype(0, dtype::Float32())
  323. .set_dtype(1, dtype::Float32())
  324. .set_dtype(2, dtype::Float32());
  325. for (auto rng : std::vector<RNG*>{&rng_nan, &rng_zero}) {
  326. param::WarpPerspective param;
  327. param.bmode = param::WarpPerspective::BorderMode::CONSTANT;
  328. param.imode = param::WarpPerspective::InterpolationMode::LINEAR;
  329. param.format = param::WarpPerspective::Format::NHWCD4;
  330. checker.set_rng(1, rng);
  331. param.border_val = 1.737;
  332. checker.set_param(param);
  333. checker.exec({{10, 10, 1, 11, 4}, {10, 3, 3}, {10, 12, 1, 13, 4}});
  334. }
  335. }
  336. #if MEGDNN_WITH_BENCHMARK
  337. namespace {
  338. void benchmark_impl(const typename WarpPerspective::Param& param,
  339. std::vector<SmallVector<TensorShape>> shapes, size_t RUNS,
  340. TaskExecutorConfig&& multi_thread_config,
  341. TaskExecutorConfig&& single_thread_config) {
  342. std::vector<float> multi_thread_times, single_thread_times;
  343. {
  344. auto multi_thread_hanle =
  345. create_cpu_handle(0, true, &multi_thread_config);
  346. auto benchmarker =
  347. Benchmarker<WarpPerspective>(multi_thread_hanle.get());
  348. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  349. for (auto shape : shapes) {
  350. multi_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  351. }
  352. }
  353. {
  354. auto single_thread_handle =
  355. create_cpu_handle(0, true, &single_thread_config);
  356. auto benchmarker =
  357. Benchmarker<WarpPerspective>(single_thread_handle.get());
  358. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  359. for (auto shape : shapes) {
  360. single_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  361. }
  362. }
  363. printf("Benchmark : Multi threads %zu, ", multi_thread_config.nr_thread);
  364. printf("core_ids:");
  365. for (size_t i = 0; i < multi_thread_config.affinity_core_set.size(); i++) {
  366. printf("%zu ", multi_thread_config.affinity_core_set[i]);
  367. }
  368. printf(", Single thread core_id %zu\n",
  369. single_thread_config.affinity_core_set[0]);
  370. for (size_t i = 0; i < shapes.size(); i++) {
  371. auto shape = shapes[i];
  372. printf("Case: ");
  373. for (auto sh : shape)
  374. printf("%s ", sh.to_string().c_str());
  375. printf("%zu threads time: %f,\n single thread time: "
  376. "%f. spead up = %f, speedup/cores=%f\n",
  377. multi_thread_config.nr_thread, multi_thread_times[i],
  378. single_thread_times[i],
  379. single_thread_times[i] / multi_thread_times[i],
  380. single_thread_times[i] / multi_thread_times[i] /
  381. multi_thread_config.nr_thread);
  382. }
  383. }
  384. } // namespace
  385. TEST_F(NAIVE_BENCHMARK_MULTI_THREADS, BENCHMARK_WARP_PERSPECTIVE) {
  386. constexpr size_t RUNS = 50;
  387. using BMode = param::WarpPerspective::BorderMode;
  388. using IMode = param::WarpPerspective::InterpolationMode;
  389. WarpPerspective::Param param;
  390. param.border_val = 0.3f;
  391. param.format = param::WarpPerspective::Format::NCHW;
  392. param.imode = IMode::INTER_LINEAR;
  393. param.bmode = BMode::REPLICATE;
  394. std::vector<SmallVector<TensorShape>> shapes;
  395. auto bench_case = [&](size_t N, size_t H, size_t W, size_t C) {
  396. SmallVector<TensorShape> shape{
  397. {N, C, H, W}, {N, 3, 3}, {N, C, 224, 224}};
  398. shapes.push_back(shape);
  399. };
  400. bench_case(1, 700, 490, 10);
  401. bench_case(1, 700, 490, 20);
  402. bench_case(1, 700, 490, 30);
  403. bench_case(1, 500, 334, 10);
  404. bench_case(1, 500, 334, 20);
  405. bench_case(1, 500, 334, 30);
  406. bench_case(1, 140, 144, 10);
  407. bench_case(1, 140, 144, 20);
  408. bench_case(1, 140, 114, 30);
  409. printf("Benchmark warp perspective\n");
  410. benchmark_impl(param, shapes, RUNS, {4, {4, 5, 6, 7}}, {1, {4}});
  411. benchmark_impl(param, shapes, RUNS, {4, {4, 5, 6, 7}}, {1, {7}});
  412. benchmark_impl(param, shapes, RUNS, {2, {4, 5}}, {1, {4}});
  413. }
  414. #endif
  415. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台