You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

elemwise.cpp 14 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /**
  2. * \file dnn/test/arm_common/elemwise.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "test/common/elemwise.h"
  13. #include "test/arm_common/fixture.h"
  14. #include "test/common/benchmarker.h"
  15. #include "test/common/checker.h"
  16. #include "megdnn/oprs/general.h"
  17. using namespace megdnn;
  18. using namespace test;
  19. template <typename tag>
  20. class ARM_ELEMWISE : public ARM_COMMON {};
  21. TYPED_TEST_CASE(ARM_ELEMWISE, elemwise::test_types);
  22. TYPED_TEST(ARM_ELEMWISE, run) {
  23. elemwise::run_test<TypeParam>(this->handle());
  24. }
  25. TEST_F(ARM_COMMON, ELEMWISE_FORWARD_TERNARY) {
  26. using Mode = ElemwiseForward::Param::Mode;
  27. Checker<ElemwiseForward> checker(handle());
  28. checker.set_param(Mode::FUSE_MUL_ADD3);
  29. auto run = [&] {
  30. //! nchw44
  31. checker.execs({{1, 3, 1, 1, 4}, {1, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  32. checker.execs({{1, 3, 1, 1, 4}, {2, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  33. checker.execs({{1, 8, 1, 1, 4}, {3, 8, 5, 3, 4}, {1, 8, 1, 1, 4}, {}});
  34. checker.execs({{3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {}});
  35. checker.execs({{1, 2, 1, 1, 4}, {1, 2, 5, 7, 4}, {1, 2, 1, 1, 4}, {}});
  36. //! nchw44
  37. checker.execs({{1, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {1, 3, 2, 2, 4}, {}});
  38. checker.execs({{2, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {2, 3, 2, 2, 4}, {}});
  39. checker.execs({{3, 8, 5, 3, 4}, {1, 8, 1, 1, 4}, {3, 8, 5, 3, 4}, {}});
  40. checker.execs({{3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {}});
  41. checker.execs({{1, 2, 5, 7, 4}, {1, 2, 1, 1, 4}, {1, 2, 5, 7, 4}, {}});
  42. checker.execs({{3, 4, 7}, {3, 4, 7}, {3, 4, 7}, {}});
  43. checker.execs({{1, 4, 1, 1}, {3, 4, 5, 7}, {1, 4, 1, 1}, {}});
  44. checker.execs({{1, 4, 1}, {3, 4, 7}, {1, 4, 1}, {}});
  45. checker.execs({{3, 4, 5, 7}, {3, 4, 5, 7}, {1, 1, 1, 1}, {}});
  46. checker.execs({{1, 7}, {1, 7}, {1, 7}, {}});
  47. checker.execs({{1, 2, 1}, {1, 2, 2}, {1, 2, 1}, {}});
  48. checker.execs({{1, 2, 2}, {1, 2, 2}, {1, 1, 1}, {}});
  49. checker.execs({{3, 4, 1}, {3, 4, 1}, {3, 4, 1}, {}});
  50. checker.execs({{3, 4, 5}, {1}, {1}, {}});
  51. checker.execs({{1}, {3, 4, 5}, {1}, {}});
  52. };
  53. // case int
  54. checker.set_dtype(0, dtype::Int8());
  55. checker.set_dtype(1, dtype::Int8());
  56. checker.set_dtype(2, dtype::Int8());
  57. run();
  58. checker.set_dtype(0, dtype::Int16());
  59. checker.set_dtype(1, dtype::Int16());
  60. checker.set_dtype(2, dtype::Int16());
  61. run();
  62. checker.set_dtype(0, dtype::Int32());
  63. checker.set_dtype(1, dtype::Int32());
  64. checker.set_dtype(2, dtype::Int32());
  65. run();
  66. // case float
  67. UniformFloatRNG rng(1e-5, 7e1);
  68. checker.set_rng(0, &rng);
  69. checker.set_epsilon(1e-5);
  70. checker.set_dtype(0, dtype::Float32());
  71. checker.set_dtype(1, dtype::Float32());
  72. checker.set_dtype(2, dtype::Float32());
  73. run();
  74. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  75. // case half
  76. UniformFloatRNG rng_float16(1, 10);
  77. checker.set_rng(0, &rng_float16);
  78. checker.set_epsilon(1e-2);
  79. checker.set_dtype(0, dtype::Float16());
  80. checker.set_dtype(1, dtype::Float16());
  81. checker.set_dtype(2, dtype::Float16());
  82. run();
  83. #endif
  84. }
  85. TEST_F(ARM_COMMON, ELEMWISE_FORWARD_NCHW44_INT8_INT16_INT32) {
  86. using Mode = ElemwiseForward::Param::Mode;
  87. Checker<ElemwiseForward> checker(handle());
  88. auto run = [&]() {
  89. // VEC_BCAST101x not PowOp
  90. checker.set_param(Mode::ADD).execs(
  91. {{1, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  92. checker.set_param(Mode::ADD).execs(
  93. {{2, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  94. checker.set_param(Mode::ADD).execs(
  95. {{3, 8, 5, 3, 4}, {1, 8, 1, 1, 4}, {}});
  96. checker.set_param(Mode::ADD).execs(
  97. {{3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {}});
  98. checker.set_param(Mode::ADD).execs(
  99. {{1, 2, 5, 7, 4}, {1, 2, 1, 1, 4}, {}});
  100. checker.set_param(Mode::RMULH)
  101. .execs({{1, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  102. checker.set_param(Mode::RMULH)
  103. .execs({{2, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  104. checker.set_param(Mode::RMULH)
  105. .execs({{3, 8, 5, 3, 4}, {1, 8, 1, 1, 4}, {}});
  106. checker.set_param(Mode::RMULH)
  107. .execs({{3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {}});
  108. checker.set_param(Mode::RMULH)
  109. .execs({{1, 2, 5, 7, 4}, {1, 2, 1, 1, 4}, {}});
  110. checker.set_param(Mode::FUSE_ADD_RELU)
  111. .execs({{1, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  112. checker.set_param(Mode::FUSE_ADD_RELU)
  113. .execs({{2, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  114. checker.set_param(Mode::FUSE_ADD_RELU)
  115. .execs({{3, 8, 5, 3, 4}, {1, 8, 1, 1, 4}, {}});
  116. checker.set_param(Mode::FUSE_ADD_RELU)
  117. .execs({{3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {}});
  118. checker.set_param(Mode::FUSE_ADD_RELU)
  119. .execs({{1, 2, 5, 7, 4}, {1, 2, 1, 1, 4}, {}});
  120. // BCAST101x_VEC not PowOp
  121. checker.set_param(Mode::ADD).execs(
  122. {{1, 3, 1, 1, 4}, {1, 3, 2, 2, 4}, {}});
  123. checker.set_param(Mode::ADD).execs(
  124. {{1, 3, 1, 1, 4}, {2, 3, 2, 2, 4}, {}});
  125. checker.set_param(Mode::ADD).execs(
  126. {{1, 8, 1, 1, 4}, {3, 8, 5, 3, 4}, {}});
  127. checker.set_param(Mode::ADD).execs(
  128. {{3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {}});
  129. checker.set_param(Mode::ADD).execs(
  130. {{1, 2, 1, 1, 4}, {1, 2, 5, 7, 4}, {}});
  131. checker.set_param(Mode::FUSE_ADD_RELU)
  132. .execs({{1, 3, 1, 1, 4}, {1, 3, 2, 2, 4}, {}});
  133. checker.set_param(Mode::FUSE_ADD_RELU)
  134. .execs({{1, 3, 1, 1, 4}, {2, 3, 2, 2, 4}, {}});
  135. checker.set_param(Mode::FUSE_ADD_RELU)
  136. .execs({{1, 8, 1, 1, 4}, {3, 8, 5, 3, 4}, {}});
  137. checker.set_param(Mode::FUSE_ADD_RELU)
  138. .execs({{3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {}});
  139. checker.set_param(Mode::FUSE_ADD_RELU)
  140. .execs({{1, 2, 1, 1, 4}, {1, 2, 5, 7, 4}, {}});
  141. };
  142. checker.set_dtype(0, dtype::Int8());
  143. checker.set_dtype(1, dtype::Int8());
  144. run();
  145. checker.set_dtype(0, dtype::Int16());
  146. checker.set_dtype(1, dtype::Int16());
  147. run();
  148. checker.set_dtype(0, dtype::Int32());
  149. checker.set_dtype(1, dtype::Int32());
  150. run();
  151. }
  152. TEST_F(ARM_COMMON, ELEMWISE_FORWARD_NCHW44_FP32) {
  153. using Mode = ElemwiseForward::Param::Mode;
  154. Checker<ElemwiseForward> checker(handle());
  155. UniformFloatRNG rng(1e-5, 7e1);
  156. checker.set_rng(0, &rng);
  157. checker.set_epsilon(1e-5);
  158. checker.set_dtype(0, dtype::Float32());
  159. checker.set_dtype(1, dtype::Float32());
  160. checker.set_param(Mode::FUSE_ADD_RELU)
  161. .execs({{1, 3, 1, 1, 4}, {1, 3, 2, 2, 4}, {}});
  162. checker.set_param(Mode::FUSE_ADD_RELU)
  163. .execs({{1, 3, 1, 1, 4}, {2, 3, 2, 2, 4}, {}});
  164. checker.set_param(Mode::FUSE_ADD_RELU)
  165. .execs({{1, 8, 1, 1, 4}, {3, 8, 5, 3, 4}, {}});
  166. checker.set_param(Mode::FUSE_ADD_RELU)
  167. .execs({{3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {}});
  168. checker.set_param(Mode::FUSE_ADD_RELU)
  169. .execs({{1, 2, 1, 1, 4}, {1, 2, 5, 7, 4}, {}});
  170. checker.set_param(Mode::FUSE_ADD_RELU)
  171. .execs({{1, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  172. checker.set_param(Mode::FUSE_ADD_RELU)
  173. .execs({{2, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  174. checker.set_param(Mode::FUSE_ADD_RELU)
  175. .execs({{3, 8, 5, 3, 4}, {1, 8, 1, 1, 4}, {}});
  176. checker.set_param(Mode::FUSE_ADD_RELU)
  177. .execs({{3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {}});
  178. checker.set_param(Mode::FUSE_ADD_RELU)
  179. .execs({{1, 2, 5, 7, 4}, {1, 2, 1, 1, 4}, {}});
  180. auto run = [&](Mode mode) {
  181. // VEC_BCAST101x
  182. checker.set_param(mode).execs({{1, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  183. checker.set_param(mode).execs({{2, 3, 2, 2, 4}, {1, 3, 1, 1, 4}, {}});
  184. checker.set_param(mode).execs({{3, 8, 5, 3, 4}, {1, 8, 1, 1, 4}, {}});
  185. checker.set_param(mode).execs({{3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {}});
  186. checker.set_param(mode).execs({{1, 2, 5, 7, 4}, {1, 2, 1, 1, 4}, {}});
  187. // BCAST101x_VEC not powOp
  188. checker.set_param(mode).execs({{1, 3, 1, 1, 4}, {1, 3, 2, 2, 4}, {}});
  189. checker.set_param(mode).execs({{1, 3, 1, 1, 4}, {2, 3, 2, 2, 4}, {}});
  190. checker.set_param(mode).execs({{1, 8, 1, 1, 4}, {3, 8, 5, 3, 4}, {}});
  191. checker.set_param(mode).execs({{3, 4, 5, 7, 4}, {3, 4, 5, 7, 4}, {}});
  192. checker.set_param(mode).execs({{1, 2, 1, 1, 4}, {1, 2, 5, 7, 4}, {}});
  193. };
  194. run(Mode::ADD);
  195. run(Mode::FUSE_ADD_H_SWISH);
  196. run(Mode::FUSE_ADD_RELU);
  197. run(Mode::MAX);
  198. run(Mode::MIN);
  199. run(Mode::MUL);
  200. run(Mode::SUB);
  201. run(Mode::TRUE_DIV);
  202. run(Mode::POW);
  203. }
  204. #if MEGDNN_WITH_BENCHMARK
  205. namespace {
  206. void run_elemwise_benchmark(const TensorShapeArray& shapes,
  207. param::Elemwise::Mode mode, const char* mode_str,
  208. DType type, Handle* handle_bench) {
  209. auto handle_fallback = create_cpu_handle(1);
  210. Benchmarker<Elemwise> benchmarker_bench(handle_bench);
  211. Benchmarker<Elemwise> benchmarker_fallback(handle_fallback.get());
  212. float throughput = 0;
  213. SmallVector<TensorLayout> layouts;
  214. std::string src_strs;
  215. for (size_t i = 0; i < shapes.size(); i++) {
  216. layouts.emplace_back(shapes[i], type);
  217. throughput += layouts.back().span().dist_byte();
  218. src_strs += layouts.back().to_string();
  219. if (i != shapes.size() - 1) {
  220. src_strs += ",";
  221. }
  222. }
  223. constexpr size_t RUN = 50;
  224. benchmarker_fallback.set_times(RUN).set_display(false);
  225. benchmarker_bench.set_times(RUN).set_display(false);
  226. benchmarker_fallback.set_param(mode);
  227. benchmarker_bench.set_param(mode);
  228. TensorLayout dst_layout;
  229. auto opr = handle_bench->create_operator<Elemwise>();
  230. opr->param() = mode;
  231. opr->deduce_layout(layouts, dst_layout);
  232. float computations = dst_layout.total_nr_elems() *
  233. (std::max<size_t>(shapes.size(), 2) - 1);
  234. throughput += dst_layout.span().dist_byte();
  235. computations *= (1e3 / (1024.0 * 1024));
  236. throughput *= (1e3 / (1024.0 * 1024));
  237. layouts.emplace_back(dst_layout);
  238. auto fallback_time = benchmarker_fallback.execl(layouts) / RUN;
  239. auto bench_time = benchmarker_bench.execl(layouts) / RUN;
  240. float fallback_flops = computations / fallback_time;
  241. float bench_flops = computations / bench_time;
  242. float fallback_thr = throughput / fallback_time;
  243. float bench_thr = throughput / bench_time;
  244. printf("%s = %s (type: %s, mode: %s) cpu=%fMFLOPS %fMB/s, bench=%fMFLOPS "
  245. "%fMB/s "
  246. "computations: %fx, throughput: %fx\n",
  247. src_strs.c_str(), dst_layout.to_string().c_str(), type.name(),
  248. mode_str, fallback_flops, fallback_thr, bench_flops, bench_thr,
  249. bench_flops / fallback_flops, bench_thr / fallback_thr);
  250. }
  251. } // namespace
  252. #define INT_RUN(shape, mode) \
  253. run_elemwise_benchmark(shape, mode, #mode, dtype::Int8{}, handle()); \
  254. run_elemwise_benchmark(shape, mode, #mode, dtype::Int16{}, handle()); \
  255. run_elemwise_benchmark(shape, mode, #mode, dtype::Int32{}, handle());
  256. #define FLOAT_RUN(shape, mode) \
  257. run_elemwise_benchmark(shape, mode, #mode, dtype::Float32{}, handle()); \
  258. run_elemwise_benchmark(shape, mode, #mode, dtype::Float16{}, handle());
  259. #define BENCHMARK_CASES(shape) \
  260. INT_BENCHMARK_CASES(shape) \
  261. FLOAT_BENCHMARK_CASES(shape)
  262. TEST_F(ARM_COMMON, BENCHMARK_UNARY) {
  263. #define INT_BENCHMARK_CASES(shape) \
  264. INT_RUN(shape, Mode::RELU); \
  265. INT_RUN(shape, Mode::ABS);
  266. #define FLOAT_BENCHMARK_CASES(shape) \
  267. FLOAT_RUN(shape, Mode::RELU); \
  268. FLOAT_RUN(shape, Mode::ABS); \
  269. FLOAT_RUN(shape, Mode::SIGMOID); \
  270. FLOAT_RUN(shape, Mode::EXP); \
  271. FLOAT_RUN(shape, Mode::TANH); \
  272. FLOAT_RUN(shape, Mode::FAST_TANH);
  273. using Mode = param::Elemwise::Mode;
  274. BENCHMARK_CASES({{10000}});
  275. BENCHMARK_CASES({{50000}});
  276. #undef INT_BENCHMARK_CASES
  277. #undef FLOAT_BENCHMARK_CASES
  278. }
  279. TEST_F(ARM_COMMON, BENCHMARK_BINARY) {
  280. #define INT_BENCHMARK_CASES(shape) \
  281. INT_RUN(shape, Mode::MIN); \
  282. INT_RUN(shape, Mode::MAX); \
  283. INT_RUN(shape, Mode::ADD); \
  284. INT_RUN(shape, Mode::SUB); \
  285. INT_RUN(shape, Mode::MUL); \
  286. INT_RUN(shape, Mode::RMULH); \
  287. INT_RUN(shape, Mode::FUSE_ADD_RELU);
  288. #define FLOAT_BENCHMARK_CASES(shape) \
  289. FLOAT_RUN(shape, Mode::MIN); \
  290. FLOAT_RUN(shape, Mode::MAX); \
  291. FLOAT_RUN(shape, Mode::ADD); \
  292. FLOAT_RUN(shape, Mode::SUB); \
  293. FLOAT_RUN(shape, Mode::MUL); \
  294. FLOAT_RUN(shape, Mode::POW); \
  295. FLOAT_RUN(shape, Mode::TRUE_DIV); \
  296. FLOAT_RUN(shape, Mode::FUSE_ADD_RELU);
  297. using Mode = param::Elemwise::Mode;
  298. TensorShapeArray shapes = {{1, 112, 28, 28}, {1, 112, 28, 28}};
  299. BENCHMARK_CASES(shapes);
  300. shapes = {{1, 16, 1, 1}, {1, 16, 112, 112}};
  301. BENCHMARK_CASES(shapes);
  302. shapes = {{1, 448, 7, 7}, {1, 448, 7, 7}};
  303. BENCHMARK_CASES(shapes);
  304. #undef INT_BENCHMARK_CASES
  305. #undef FLOAT_BENCHMARK_CASES
  306. }
  307. TEST_F(ARM_COMMON, BENCHMARK_TERNARY_FMA3) {
  308. #define INT_BENCHMARK_CASES(shape) INT_RUN(shape, Mode::FUSE_MUL_ADD3);
  309. #define FLOAT_BENCHMARK_CASES(shape) FLOAT_RUN(shape, Mode::FUSE_MUL_ADD3);
  310. using Mode = param::Elemwise::Mode;
  311. TensorShapeArray shapes = {{30, 40, 70}, {30, 40, 70}, {30, 40, 70}};
  312. BENCHMARK_CASES(shapes);
  313. shapes = {{1, 4, 1, 1}, {3, 4, 5, 7}, {1, 4, 1, 1}};
  314. BENCHMARK_CASES(shapes);
  315. shapes = {{3, 4, 5, 7}, {3, 4, 5, 7}, {1, 1, 1, 1}};
  316. BENCHMARK_CASES(shapes);
  317. #undef INT_BENCHMARK_CASES
  318. #undef FLOAT_BENCHMARK_CASES
  319. }
  320. #undef BENCHMARK_CASES
  321. #undef INT_RUN
  322. #undef FLOAT_RUN
  323. #endif
  324. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台