You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pooling.cpp 4.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /**
  2. * \file dnn/test/x86/pooling.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "test/common/pooling.h"
  12. #include "test/common/benchmarker.h"
  13. #include "test/common/checker.h"
  14. #include "test/x86/fixture.h"
  15. namespace megdnn {
  16. namespace test {
  17. TEST_F(X86, POOLING) {
  18. auto args = pooling::get_args();
  19. for (auto&& arg : args) {
  20. Checker<Pooling> checker(handle());
  21. checker.set_param(arg.param).exec(TensorShapeArray{arg.ishape, {}});
  22. }
  23. }
  24. #if defined(MEGDNN_X86_WITH_MKL_DNN)
  25. TEST_F(X86, POOLING88) {
  26. Checker<Pooling> checker(handle());
  27. auto args = pooling::get_args();
  28. for (auto&& arg : args) {
  29. arg.ishape.ndim = 5;
  30. arg.ishape[1] = (arg.ishape[1] + 7) / 8;
  31. arg.ishape[4] = 8;
  32. arg.param.format = param::Pooling::Format::NCHW88;
  33. checker.set_param(arg.param).exec(TensorShapeArray{arg.ishape, {}});
  34. }
  35. }
  36. TEST_F(X86_MULTI_THREADS, POOLING88) {
  37. Checker<Pooling> checker(handle());
  38. auto args = pooling::get_args();
  39. for (auto&& arg : args) {
  40. arg.ishape.ndim = 5;
  41. arg.ishape[1] = (arg.ishape[1] + 7) / 8;
  42. arg.ishape[4] = 8;
  43. arg.param.format = param::Pooling::Format::NCHW88;
  44. checker.set_param(arg.param).exec(TensorShapeArray{arg.ishape, {}});
  45. }
  46. }
  47. #endif
  48. #if MEGDNN_WITH_BENCHMARK
  49. static void test_x86_megdnn_pooling(Handle* handle) {
  50. constexpr size_t RUNS = 50;
  51. auto rng = std::make_unique<UniformIntRNG>(-127, 127);
  52. Benchmarker<Pooling> benchmarker_pooling(handle);
  53. benchmarker_pooling.set_times(RUNS)
  54. .set_dtype(0, dtype::QuantizedS8(1.2))
  55. .set_display(false)
  56. .set_rng(0, rng.get());
  57. auto run = [&](uint32_t pad, uint32_t stride, uint32_t window_size,
  58. size_t in_number, size_t in_channel, size_t in_height,
  59. size_t in_width) {
  60. TensorLayout dst_layout;
  61. auto opr = handle->create_operator<Pooling>();
  62. opr->param() = {param::Pooling::Mode::MAX,
  63. pad,
  64. pad,
  65. stride,
  66. stride,
  67. window_size,
  68. window_size};
  69. TensorShape shape{in_number, in_channel, in_height, in_width};
  70. opr->deduce_layout({shape, dtype::Int8{}}, dst_layout);
  71. float computation =
  72. dst_layout.total_nr_elems() * window_size * window_size * 1e-9;
  73. auto pooling_used =
  74. benchmarker_pooling
  75. .set_param({param::Pooling::Mode::MAX, pad, pad, stride,
  76. stride, window_size, window_size})
  77. .exec(TensorShapeArray{shape, {}}) /
  78. RUNS;
  79. float through_put = computation / pooling_used * 1e3;
  80. std::cout << "{" << pad << "," << stride << "," << window_size << ","
  81. << in_number << "," << in_channel << "," << in_height << ","
  82. << in_width << "} "
  83. << "use time " << pooling_used << "ms, "
  84. << "through_put " << through_put << "Gops, " << std::endl;
  85. };
  86. for (auto widows_size : {2, 3})
  87. for (auto stride : {2})
  88. for (auto pad : {2})
  89. for (auto n : {1, 3, 4})
  90. for (auto c : {1, 32, 64})
  91. for (auto h_w : {12, 32, 64}) {
  92. run(pad, stride, widows_size, n, c, h_w, h_w);
  93. }
  94. }
  95. TEST_F(X86, BENCHMARK_POOLING) {
  96. test_x86_megdnn_pooling(handle());
  97. }
  98. TEST_F(X86_MULTI_THREADS, BENCHMARK_POOLING) {
  99. test_x86_megdnn_pooling(handle());
  100. }
  101. #endif
  102. #if defined(MEGDNN_X86_WITH_MKL_DNN)
  103. TEST_F(X86, POOLING_INT8) {
  104. auto args = pooling::get_args();
  105. for (auto&& arg : args) {
  106. Checker<Pooling> checker(handle());
  107. auto rng = std::make_unique<UniformIntRNG>(-127, 127);
  108. checker.set_dtype(0, dtype::Int8()).set_rng(0, rng.get());
  109. checker.set_param(arg.param).exec(TensorShapeArray{arg.ishape, {}});
  110. }
  111. }
  112. TEST_F(X86_MULTI_THREADS, POOLING_INT8) {
  113. auto args = pooling::get_args();
  114. for (auto&& arg : args) {
  115. Checker<Pooling> checker(handle());
  116. auto rng = std::make_unique<UniformIntRNG>(-127, 127);
  117. checker.set_dtype(0, dtype::Int8()).set_rng(0, rng.get());
  118. checker.set_param(arg.param).exec(TensorShapeArray{arg.ishape, {}});
  119. }
  120. }
  121. #endif
  122. } // namespace test
  123. } // namespace megdnn
  124. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台