You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

checker.h 17 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. /**
  2. * \file dnn/test/common/checker.h
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #pragma once
  12. #include "megdnn/basic_types.h"
  13. #include "megdnn/tensor_iter.h"
  14. #include "test/common/opr_algo_proxy.h"
  15. #include "test/common/opr_proxy.h"
  16. #include "test/common/rng.h"
  17. #include <gtest/gtest.h>
  18. #include <memory>
  19. #include <regex>
  20. #include <unordered_map>
  21. namespace megdnn {
  22. namespace test {
  23. class CheckerHelper {
  24. // TensorLayoutArray and TensorValueArray should be protected in theory;
  25. // but g++-4.9 bugs handle access privilege wrongfully, so we change it
  26. // to public.
  27. public:
  28. using TensorValueArray = TensorNDArray;
  29. using TensorsConstriant = std::function<void(TensorValueArray& tensors)>;
  30. using ExtraOprImpl = std::function<void(const TensorNDArray&)>;
  31. using OutputCanonizer = std::function<void(const TensorValueArray&)>;
  32. static std::shared_ptr<TensorValueArray> alloc_tensors(
  33. Handle* handle, const TensorLayoutArray& layouts, size_t offset);
  34. Handle* handle() const { return m_handle_cur; }
  35. protected:
  36. //! whether to use physically contiguous (i.e. default layout) for naive
  37. //! impl
  38. bool m_enable_contig_naive = false;
  39. bool m_prev_succ = true;
  40. const char* m_input_tensors_fpath = nullptr;
  41. thin_function<void()> m_expect_exec_fail;
  42. std::unique_ptr<Handle> m_handle_naive;
  43. Handle* m_handle_cur;
  44. std::unique_ptr<RNG> m_default_rng;
  45. std::unordered_map<size_t, RNG*> m_rng;
  46. std::unordered_map<size_t, DType> m_dtype;
  47. std::unordered_map<size_t, TensorFormat> m_fmt;
  48. float_t m_epsilon = 1e-3, m_max_avg_error = 1e-3,
  49. m_max_avg_biased_error = 1e-3;
  50. float_t m_perf_check_threshold = -1;
  51. bool m_perf_check = false;
  52. ExtraOprImpl m_extra_opr_impl;
  53. OutputCanonizer m_output_canonizer;
  54. TensorsConstriant m_tensor_constraint;
  55. /**
  56. * the offset from the start of malloc memory
  57. *
  58. * \note alloc \p m_offset more memory when alloc memory for a tensor,
  59. * the start of tensor just begin at \p m_offset.
  60. * \warning current only used for opencl
  61. */
  62. size_t m_offset = 0;
  63. CheckerHelper(Handle* handle, bool check_dispatch = true);
  64. ~CheckerHelper() noexcept;
  65. using OprExec = std::function<void(const TensorValueArray&)>;
  66. void do_exec_with_testcases(const TensorValueArray& testcase_in,
  67. const TensorValueArray& testcase_out,
  68. const OprExec& exec_opr);
  69. void do_exec(const TensorLayoutArray& user_layouts,
  70. const TensorLayoutArray& deduced_layouts,
  71. const OprExec& exec_naive, const OprExec& exec_opr);
  72. void enable_contig_naive() { m_enable_contig_naive = true; }
  73. private:
  74. std::shared_ptr<TensorValueArray> m_tensors_naive;
  75. void init_naive_values();
  76. void copy_tensors_to_device(const TensorValueArray& dest,
  77. const TensorValueArray& src);
  78. void copy_tensors_from_device(const TensorValueArray& dest,
  79. const TensorValueArray& src);
  80. void check_tensors(const TensorValueArray& expected,
  81. const TensorValueArray& computed);
  82. };
  83. template <typename Opr, typename Proxy = OprProxy<Opr>>
  84. class Checker : public CheckerHelper {
  85. public:
  86. using Param = typename Opr::Param;
  87. using BeforeExecCallback =
  88. std::function<void(Opr*, const TensorValueArray&)>;
  89. Checker(Handle* handle, bool check_dispatch = true)
  90. : CheckerHelper(handle, check_dispatch), m_param(Param()) {}
  91. TensorLayoutArray make_layouts(const TensorShapeArray& shapes) {
  92. TensorLayoutArray layouts(shapes.size());
  93. for (size_t i = 0; i < shapes.size(); ++i) {
  94. DType dt = (m_dtype.find(i) != m_dtype.end() ? m_dtype[i]
  95. : dtype::Float32());
  96. TensorFormat fmt =
  97. (m_fmt.find(i) != m_fmt.end() ? m_fmt[i] : TensorFormat{});
  98. layouts[i] = TensorLayout(shapes[i], dt, fmt);
  99. }
  100. return layouts;
  101. }
  102. /*!
  103. * \brief execute opr on current param/dtype/rng config
  104. * \param shapes input/output shapes, which would be passed as
  105. * arguments to Opr::deduce_layout
  106. *
  107. * Checker would construct TensorLayout vectors from shapes and dtypes,
  108. * and call exec(TensorLayoutArray &).
  109. */
  110. Checker& exec(const TensorShapeArray& shapes) {
  111. exec(make_layouts(shapes));
  112. return *this;
  113. }
  114. void exec(TensorLayoutArray layouts);
  115. //! explicitly require argument to be TensorShape
  116. Checker& execs(const TensorShapeArray& shapes) { return exec(shapes); }
  117. //! explicitly require argument to be TensorLayout
  118. Checker& execl(const TensorLayoutArray& layouts) {
  119. exec(layouts);
  120. return *this;
  121. }
  122. Checker& exect(const TensorValueArray& testcase_in,
  123. const TensorValueArray& testcase_out);
  124. Checker& set_param(Param param) {
  125. m_param = param;
  126. opr()->param() = param;
  127. return *this;
  128. }
  129. Checker& set_dtype(size_t idx, DType dtype) {
  130. m_dtype[idx] = dtype;
  131. return *this;
  132. }
  133. Checker& set_fmt(size_t idx, TensorFormat fmt) {
  134. m_fmt[idx] = fmt;
  135. return *this;
  136. }
  137. Checker& set_rng(size_t idx, RNG* rng) {
  138. m_rng[idx] = rng;
  139. return *this;
  140. }
  141. //! max error of a single element
  142. Checker& set_epsilon(dt_float32 epsilon) {
  143. m_epsilon = epsilon;
  144. m_max_avg_error = epsilon;
  145. m_max_avg_biased_error = epsilon;
  146. return *this;
  147. }
  148. //! max average error; defaults to epsilon
  149. Checker& set_max_avg_error(dt_float32 error) {
  150. m_max_avg_error = error;
  151. return *this;
  152. }
  153. //! max average biased error; defaults to epsilon
  154. Checker& set_max_avg_biased_error(dt_float32 error) {
  155. m_max_avg_biased_error = error;
  156. return *this;
  157. }
  158. Checker& set_offset(size_t offset) {
  159. m_offset = offset;
  160. return *this;
  161. }
  162. Checker& set_proxy(const Proxy& proxy) {
  163. m_naive_proxy = proxy;
  164. m_cur_proxy = proxy;
  165. return *this;
  166. }
  167. //! set_perf_check and set_perf_check_threshold control the
  168. //! performance checking behavior.
  169. //!
  170. //! If perf_check is on (default to off), the running time of the
  171. //! current operator and the naive operator would be measured and
  172. //! checked when calling exec.
  173. //! The accelerating ratio should be larger than perf_check_threshold,
  174. //! otherwise errors would be reported.
  175. //! perf_check_threshold must be set in advance since the default value
  176. //! (which is negative) is invalid.
  177. Checker& set_perf_check(bool perf_check) {
  178. m_perf_check = perf_check;
  179. return *this;
  180. }
  181. Checker& set_perf_check_threshold(float perf_check_threshold) {
  182. m_perf_check_threshold = perf_check_threshold;
  183. return *this;
  184. }
  185. //! load input tensors from file for next run
  186. Checker& load_input_tensors(const char* fpath) {
  187. m_input_tensors_fpath = fpath;
  188. return *this;
  189. }
  190. //! add another checker to ensure naive implementation is correct
  191. Checker& set_extra_opr_impl(const ExtraOprImpl& chk) {
  192. m_extra_opr_impl = chk;
  193. return *this;
  194. }
  195. //! set a callback to be invoked before executing the operator
  196. Checker& set_before_exec_callback(const BeforeExecCallback& cb) {
  197. m_before_exec_callback = cb;
  198. return *this;
  199. }
  200. //! set a tensors constraints function, for the purpose of manipulating
  201. //! tensors when testing.
  202. Checker& set_tensors_constraint(
  203. const TensorsConstriant& tensor_constraint) {
  204. m_tensor_constraint = tensor_constraint;
  205. return *this;
  206. }
  207. /*!
  208. * \brief set that exec() on opr should fail, so naive is not called and
  209. * exec() returns directly after opr is called.
  210. *
  211. * This is only valid for next exec() call. It is usually used for
  212. * testing megcore::AsyncErrorInfo.
  213. *
  214. * \param cb callback to be invoked after opr exec (so error would not
  215. * be passed to destructor)
  216. */
  217. Checker& set_expect_exec_fail(const thin_function<void()>& cb) {
  218. m_expect_exec_fail = cb;
  219. return *this;
  220. }
  221. /*!
  222. * \brief set a function to canonize the outputs
  223. *
  224. * For some oprs maybe multiple outputs can be accepted; we can use a
  225. * function to transform them into a canonized form before comparing.
  226. *
  227. * The arguments are tensors on CPU and should be modified in-place.
  228. */
  229. Checker& set_output_canonizer(OutputCanonizer canonizer) {
  230. m_output_canonizer = std::move(canonizer);
  231. return *this;
  232. }
  233. //! get the opr impl so setting other than param() can be modified
  234. Opr* opr() {
  235. if (!m_opr_cur) {
  236. m_opr_cur = m_handle_cur->create_operator<Opr>();
  237. }
  238. return m_opr_cur.get();
  239. }
  240. //! whether previous exec succeeds
  241. bool prev_succ() const { return m_prev_succ; }
  242. private:
  243. BeforeExecCallback m_before_exec_callback;
  244. Param m_param;
  245. Proxy m_naive_proxy, m_cur_proxy;
  246. std::unique_ptr<Opr> m_opr_cur;
  247. };
  248. ::testing::AssertionResult __assert_tensor_eq(
  249. const char* expr0, const char* expr1, const char* expr_maxerr,
  250. const char* expr_maxerr_avg, const char* expr_maxerr_avg_biased,
  251. const TensorND& v0, const TensorND& v1, float maxerr, float maxerr_avg,
  252. float maxerr_avg_biased);
  253. #define MEGDNN_ASSERT_TENSOR_EQ_EPS_AVG(v0, v1, maxerr, maxerr_avg, \
  254. maxerr_avg_biased) \
  255. ASSERT_PRED_FORMAT5(::megdnn::test::__assert_tensor_eq, v0, v1, maxerr, \
  256. maxerr_avg, maxerr_avg_biased)
  257. #define MEGDNN_ASSERT_TENSOR_EQ_EPS(v0, v1, maxerr) \
  258. MEGDNN_ASSERT_TENSOR_EQ_EPS_AVG(v0, v1, maxerr, maxerr, maxerr)
  259. #define MEGDNN_ASSERT_TENSOR_EQ(v0, v1) \
  260. MEGDNN_ASSERT_TENSOR_EQ_EPS(v0, v1, 1e-3)
  261. template <typename Opr, typename Proxy>
  262. void Checker<Opr, Proxy>::exec(TensorLayoutArray layouts) {
  263. auto opr_naive = m_handle_naive->create_operator<Opr>();
  264. auto opr_relayout = m_handle_naive->create_operator<RelayoutForward>();
  265. auto opr_cur = this->opr();
  266. opr_naive->param() = m_param;
  267. opr_cur->param() = m_param;
  268. m_naive_proxy.deduce_layout(opr_naive.get(), layouts);
  269. auto exec_naive = [this, &opr_naive, &layouts,
  270. &opr_relayout](const TensorValueArray& values) {
  271. TensorValueArray contig_values = values;
  272. TensorValueArray real_values = values;
  273. std::shared_ptr<TensorValueArray> tensors_naive_contig_storage;
  274. if (m_enable_contig_naive) {
  275. TensorLayoutArray contig_layouts;
  276. for (auto&& layout : layouts) {
  277. contig_layouts.emplace_back(TensorLayout{
  278. static_cast<const TensorShape&>(layout), layout.dtype});
  279. }
  280. m_naive_proxy.deduce_layout(opr_naive.get(), contig_layouts);
  281. tensors_naive_contig_storage = alloc_tensors(
  282. m_handle_naive.get(), contig_layouts, m_offset);
  283. contig_values = *tensors_naive_contig_storage;
  284. //! relayout value to the contig_values
  285. for (size_t i = 0; i < contig_values.size(); ++i) {
  286. if (real_values[i].layout.ndim == 0)
  287. continue;
  288. real_values[i].layout.format = {};
  289. opr_relayout->exec(real_values[i], contig_values[i],
  290. m_handle_naive.get());
  291. }
  292. }
  293. m_naive_proxy.exec(opr_naive.get(), contig_values);
  294. if (m_enable_contig_naive) {
  295. //! relayout to the values
  296. for (size_t i = 0; i < contig_values.size(); ++i) {
  297. if (real_values[i].layout.ndim == 0)
  298. continue;
  299. opr_relayout->exec(contig_values[i], real_values[i],
  300. m_handle_naive.get());
  301. }
  302. }
  303. };
  304. auto exec_opr = [this, opr_cur](const TensorValueArray& values) {
  305. if (m_before_exec_callback) {
  306. m_before_exec_callback(opr_cur, values);
  307. }
  308. m_cur_proxy.exec(opr_cur, values);
  309. };
  310. auto user_layouts = layouts;
  311. do_exec(user_layouts, layouts, exec_naive, exec_opr);
  312. }
  313. template <typename Opr, typename Proxy>
  314. Checker<Opr, Proxy>& Checker<Opr, Proxy>::exect(
  315. const TensorValueArray& testcase_in,
  316. const TensorValueArray& testcase_out) {
  317. auto opr_cur = this->opr();
  318. opr_cur->param() = m_param;
  319. auto exec_opr = [this, opr_cur](const TensorValueArray& values) {
  320. if (m_before_exec_callback) {
  321. m_before_exec_callback(opr_cur, values);
  322. }
  323. m_cur_proxy.exec(opr_cur, values);
  324. };
  325. do_exec_with_testcases(testcase_in, testcase_out, exec_opr);
  326. return *this;
  327. }
  328. template <typename T, typename U>
  329. TensorND TensorValue(const TensorShape& shape, T dtype,
  330. std::initializer_list<U> values) {
  331. TensorND tensor;
  332. tensor.layout = {shape, dtype};
  333. tensor.raw_ptr =
  334. static_cast<dt_byte*>(malloc(tensor.layout.span().dist_byte()));
  335. megdnn_assert(values.size() == tensor.layout.total_nr_elems());
  336. auto ptr = tensor.ptr<typename DTypeTrait<T>::ctype>();
  337. for (const auto& v : values) {
  338. *ptr++ = typename DTypeTrait<T>::ctype(v);
  339. }
  340. return tensor;
  341. }
  342. template <typename T, typename U>
  343. TensorND TensorValueLowbit4(const TensorShape& shape, T dtype,
  344. std::vector<U> values) {
  345. TensorND tensor;
  346. tensor.layout = {shape, dtype};
  347. tensor.raw_ptr =
  348. static_cast<dt_byte*>(malloc(tensor.layout.span().dist_byte()));
  349. megdnn_assert(values.size() == tensor.layout.total_nr_elems());
  350. auto ptr = static_cast<U*>(tensor.raw_ptr);
  351. for (size_t i = 0; i < values.size(); i += 2) {
  352. U val0 = values[i], val1 = values[i + 1];
  353. megdnn_assert(val0 >= DTypeTrait<T>::min());
  354. megdnn_assert(val1 <= DTypeTrait<T>::max());
  355. ptr[i / 2] = (val0 & 0xF) | (val1 << 4);
  356. }
  357. return tensor;
  358. }
  359. class Testcase : public SmallVector<TensorND> {
  360. public:
  361. using SmallVector<TensorND>::SmallVector;
  362. ~Testcase() {
  363. // Suicide
  364. for (const auto& tensor : *this) {
  365. if (tensor.raw_ptr) {
  366. free(tensor.raw_ptr);
  367. }
  368. }
  369. }
  370. Testcase(const Testcase&) = delete;
  371. Testcase operator=(const Testcase&) = delete;
  372. };
  373. /*!
  374. * \brief a callable to check that given algorithm is used for heuristic
  375. * \param require_algo if its value is true, then requires
  376. * get_algorithm_heuristic() to return the expected algo; otherwise the
  377. * expected algo must exist in get_all_algorithms() and it would be set to
  378. * be used
  379. */
  380. template <class Opr, typename OprAlgoProxy = OprAlgoProxy<Opr>>
  381. class AlgoChecker {
  382. std::string m_name;
  383. typename Opr::Algorithm* m_algo = nullptr;
  384. bool* m_require_algo;
  385. public:
  386. AlgoChecker(const char* name, bool* require_algo = nullptr)
  387. : m_name{name}, m_require_algo{require_algo} {}
  388. AlgoChecker(typename Opr::Algorithm* algo, bool* require_algo = nullptr)
  389. : m_algo{algo}, m_require_algo{require_algo} {}
  390. void operator()(Opr* opr, const CheckerHelper::TensorValueArray& arr) {
  391. opr->execution_policy().algorithm = nullptr;
  392. TensorLayoutArray layouts;
  393. for (auto&& val : arr) {
  394. layouts.push_back(val.layout);
  395. }
  396. if (m_require_algo && *m_require_algo) {
  397. auto algo = OprAlgoProxy::get_algorithm_heuristic(opr, layouts);
  398. if (m_name.empty()) {
  399. ASSERT_EQ(m_algo->name(), algo->name());
  400. } else {
  401. ASSERT_TRUE(std::regex_match(
  402. algo->name(), std::regex("(" + m_name + ")(.*)")));
  403. }
  404. } else {
  405. if (m_name.empty()) {
  406. opr->execution_policy().algorithm = m_algo;
  407. return;
  408. } else {
  409. for (auto i : OprAlgoProxy::get_all_algorithms(opr, layouts)) {
  410. if (std::regex_match(i->name(),
  411. std::regex("(" + m_name + ")(.*)"))) {
  412. opr->execution_policy().algorithm = i;
  413. return;
  414. }
  415. }
  416. }
  417. ASSERT_TRUE(false) << "algorithm " << m_name << " not found";
  418. }
  419. }
  420. };
  421. } // namespace test
  422. } // namespace megdnn
  423. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台