You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

conv_bias.h 6.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. /**
  2. * \file dnn/test/common/conv_bias.h
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #pragma once
  12. #include "megdnn/basic_types.h"
  13. #include "megdnn/opr_param_defs.h"
  14. #include "test/common/checker.h"
  15. #include "src/fallback/conv_bias/opr_impl.h"
  16. #include <regex>
  17. namespace megdnn {
  18. namespace test {
  19. namespace conv_bias {
  20. struct TestArg {
  21. param::ConvBias param;
  22. TensorShape src, filter, bias;
  23. TestArg(param::ConvBias param, TensorShape src, TensorShape filter,
  24. TensorShape bias)
  25. : param(param), src(src), filter(filter), bias(bias) {}
  26. };
  27. std::vector<TestArg> get_args();
  28. std::vector<TestArg> get_args_1x1();
  29. std::vector<TestArg> get_chanwise_args();
  30. std::vector<TestArg> get_winograd_args(size_t kernel_size);
  31. std::vector<TestArg> get_winograd_mk_packed_args(size_t pack_size = 4);
  32. std::vector<TestArg> get_quantized_winograd_mk_packed_args(
  33. size_t pack_size = 4, bool compute_float32 = false);
  34. std::vector<TestArg> get_quantized_args_with_nlmode(
  35. param::ConvBias::NonlineMode nlmode);
  36. std::vector<TestArg> get_quantized_args();
  37. std::vector<TestArg> get_int8_nchw4_args(size_t kernel_size);
  38. std::vector<TestArg> get_int8_nchw4_args_check_bounds(size_t kernel_size);
  39. std::vector<TestArg> get_int8_nchw4_small_channel_args(size_t kernel_size);
  40. std::vector<TestArg> get_int8_nchw4_small_channel_args_check_bounds(
  41. size_t kernel_size);
  42. std::vector<TestArg> get_int8_nchw4_args_small_batch(size_t kernel_size);
  43. std::vector<TestArg> get_int8_chwn4_args(size_t kernel_size);
  44. std::vector<TestArg> get_int8_chwn4_args_check_bounds(size_t kernel_size);
  45. std::vector<TestArg> get_int8_chwn4_small_channel_args(size_t kernel_size);
  46. std::vector<TestArg> get_int8_chwn4_small_channel_args_check_bounds(
  47. size_t kernel_size);
  48. std::vector<TestArg> get_int8_chwn4_args_small_batch(size_t kernel_size);
  49. std::vector<TestArg> get_int8_nchw4_tensorcore_args(size_t kernel_size);
  50. std::vector<TestArg> get_int8_chwn4_tensorcore_args(size_t kernel_size);
  51. std::vector<TestArg> get_int8_nchw44_args(size_t kernel_size, size_t pack_size,
  52. bool compute_float32 = false,
  53. bool group_mode = false);
  54. void check_conv_bias_preprocess(std::vector<conv_bias::TestArg> args,
  55. Handle* handle, RNG* rng, float epsilon,
  56. DType type0, DType type1, DType type2,
  57. DType type3, const char* algo_name);
  58. template <typename Opr>
  59. using ConvBiasAlgoChecker = AlgoChecker<Opr>;
  60. void check_conv_bias(
  61. DType src_dtype, DType filter_dtype, DType bias_dtype, DType dst_dtype,
  62. Handle* handle, const char* algo = nullptr,
  63. param::ConvBias::Format format = param::ConvBias::Format::NCHW4,
  64. const std::vector<TestArg>& args = {}, bool fuse_z = false, bool stable_test = false);
  65. #if MEGDNN_WITH_BENCHMARK
  66. std::vector<conv_bias::TestArg> get_winograd_benchmark_args(
  67. size_t kernel, size_t pack_size = 1);
  68. void benchmark_winograd(const char* algo_name, megdnn::Handle* handle,
  69. size_t kernel, size_t pack_size = 1);
  70. #endif // MEGDNN_WITH_BENCHMARK
  71. std::vector<megdnn::test::conv_bias::TestArg> get_conv_bias_args(
  72. std::vector<size_t> kernel, size_t stride, bool no_pad, bool no_bias,
  73. bool no_nonlinemode, bool quantized_nlmod = false,
  74. bool only_broadcast_bias = false);
  75. std::vector<megdnn::test::conv_bias::TestArg> get_conv_bias_1x1_args(
  76. bool no_bias, bool no_nonlinemode, bool quantized_nlmod = false,
  77. bool only_broadcast_bias = false);
  78. void check_conv_bias(std::vector<megdnn::test::conv_bias::TestArg> args,
  79. megdnn::Handle* handle, const char* algo_name);
  80. void checker_conv_bias_int8x8x16(
  81. std::vector<megdnn::test::conv_bias::TestArg> args,
  82. megdnn::Handle* handle, const char* algo_name);
  83. void checker_conv_bias_common(std::vector<conv_bias::TestArg> args,
  84. Handle* handle, RNG* rng, float epsilon,
  85. DType type0, DType type1, DType type2,
  86. DType type3, const char* algo_name);
  87. std::vector<conv_bias::TestArg> get_nchw44_conv_bias_args(
  88. std::vector<size_t> kernel_vec,
  89. std::vector<param::ConvBias::NonlineMode> nlmode_vec,
  90. std::vector<megdnn::BiasMode> biasmode_vec, size_t stride,
  91. bool no_pad = false, bool is_input_nchw = false,
  92. bool is_nchw44_dot = false);
  93. void checker_conv_bias_mul_int8x8x32(std::vector<conv_bias::TestArg> args,
  94. Handle* handle, const char* algo_name);
  95. void checker_conv_bias_int8x8x32_preprocess(
  96. std::vector<conv_bias::TestArg> args, Handle* handle,
  97. const char* algo_name);
  98. #define FULL_NLMODE \
  99. { \
  100. param::ConvBias::NonlineMode::IDENTITY, \
  101. param::ConvBias::NonlineMode::RELU, \
  102. param::ConvBias::NonlineMode::H_SWISH, \
  103. param::ConvBias::NonlineMode::SIGMOID \
  104. }
  105. #define QUAN_NLMODE \
  106. { \
  107. param::ConvBias::NonlineMode::IDENTITY, \
  108. param::ConvBias::NonlineMode::RELU, \
  109. param::ConvBias::NonlineMode::H_SWISH \
  110. }
  111. #define ONLY_IDENTITY_NLMODE \
  112. { param::ConvBias::NonlineMode::IDENTITY }
  113. #define ALL_BIASMODE \
  114. { \
  115. megdnn::BiasMode::NO_BIAS, megdnn::BiasMode::BROADCAST_CHANNEL_BIAS, \
  116. megdnn::BiasMode::BIAS \
  117. }
  118. #define BR_AND_NO_BIASMODE \
  119. { megdnn::BiasMode::NO_BIAS, megdnn::BiasMode::BROADCAST_CHANNEL_BIAS }
  120. #define BR_AND_BIAS_BIASMODE \
  121. { megdnn::BiasMode::NO_BIAS, megdnn::BiasMode::BIAS }
  122. #define ONLY_BR_BIASMODE \
  123. { megdnn::BiasMode::BROADCAST_CHANNEL_BIAS }
  124. #define ONLY_NO_BIASMODE \
  125. { megdnn::BiasMode::NO_BIAS }
  126. #define ONLY_BIAS_BIASMODE \
  127. { megdnn::BiasMode::BIAS }
  128. } // namespace conv_bias
  129. } // namespace test
  130. } // namespace megdnn
  131. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台