You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_common.h 6.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /**
  2. * \file test/test_common.h
  3. *
  4. * This file is part of MegEngine, a deep learning framework developed by
  5. * Megvii.
  6. *
  7. * \copyright Copyright (c) 2020-2021 Megvii Inc. All rights reserved.
  8. */
  9. #pragma once
  10. #include "lite_build_config.h"
  11. #if LITE_BUILD_WITH_MGE
  12. #include "../src/misc.h"
  13. #include "../src/mge/network_impl.h"
  14. #include "../src/mge/common.h"
  15. #include "lite/network.h"
  16. #include "lite/tensor.h"
  17. #include "megbrain/tensor.h"
  18. #include "megbrain/graph/bases.h"
  19. #include "megbrain/plugin/opr_io_dump.h"
  20. #include "megbrain/plugin/profiler.h"
  21. #include "megbrain/serialization/extern_c_opr.h"
  22. #include "megbrain/serialization/file.h"
  23. #include "megbrain/serialization/load_dump_config.h"
  24. #include "megbrain/serialization/serializer.h"
  25. #include "megbrain/utils/thin/hash_table.h"
  26. #include "npy.h"
  27. #include <gtest/gtest.h>
  28. #include <string.h>
  29. #include <chrono>
  30. #include <memory>
  31. #include <random>
  32. namespace lite {
  33. template <typename T>
  34. static ::testing::AssertionResult compare_memory(const void* memory0,
  35. const void* memory1,
  36. size_t length,
  37. float maxerr = 1e-3) {
  38. const T* data_ptr0 = static_cast<const T*>(memory0);
  39. const T* data_ptr1 = static_cast<const T*>(memory1);
  40. for (size_t i = 0; i < length; i++) {
  41. auto diff = std::abs(data_ptr0[i] - data_ptr1[i]);
  42. if (diff > maxerr) {
  43. return ::testing::AssertionFailure()
  44. << "Unequal value:\n"
  45. << "value 0 = " << data_ptr0[i] << "\n"
  46. << "value 1 = " << data_ptr1[i] << "\n"
  47. << "At index: " << i << "\n";
  48. }
  49. }
  50. return ::testing::AssertionSuccess();
  51. }
  52. template <typename T>
  53. void compare_lite_tensor(std::shared_ptr<Tensor> tensor0,
  54. std::shared_ptr<Tensor> tensor1, float maxerr = 1e-3) {
  55. size_t elemsize = tensor0->get_layout().get_elem_size();
  56. T* data_ptr0 = static_cast<T*>(tensor0->get_memory_ptr());
  57. T* data_ptr1 = static_cast<T*>(tensor1->get_memory_ptr());
  58. size_t length = tensor0->get_tensor_total_size_in_byte() / elemsize;
  59. EXPECT_TRUE(compare_memory<T>(data_ptr0, data_ptr1, length, maxerr));
  60. }
  61. __attribute__((unused)) static std::shared_ptr<Tensor> get_input_data(
  62. std::string path) {
  63. std::string type_str;
  64. std::vector<npy::ndarray_len_t> stl_shape;
  65. std::vector<int8_t> raw;
  66. npy::LoadArrayFromNumpy(path, type_str, stl_shape, raw);
  67. auto lite_tensor = std::make_shared<Tensor>(LiteDeviceType::LITE_CPU);
  68. Layout layout;
  69. layout.ndim = stl_shape.size();
  70. const std::map<std::string, LiteDataType> type_map = {
  71. {"f4", LiteDataType::LITE_FLOAT},
  72. {"i4", LiteDataType::LITE_INT},
  73. {"i1", LiteDataType::LITE_INT8},
  74. {"u1", LiteDataType::LITE_UINT8}};
  75. layout.shapes[0] = 1;
  76. for (size_t i = 0; i < stl_shape.size(); i++) {
  77. layout.shapes[i] = static_cast<size_t>(stl_shape[i]);
  78. }
  79. for (auto& item : type_map) {
  80. if (type_str.find(item.first) != std::string::npos) {
  81. layout.data_type = item.second;
  82. break;
  83. }
  84. }
  85. lite_tensor->set_layout(layout);
  86. size_t length = lite_tensor->get_tensor_total_size_in_byte();
  87. void* dest = lite_tensor->get_memory_ptr();
  88. memcpy(dest, raw.data(), length);
  89. return lite_tensor;
  90. }
  91. __attribute__((unused)) static std::shared_ptr<Tensor> mgelite_lar(
  92. std::string model_path, const Config& config, std::string,
  93. std::shared_ptr<Tensor> input) {
  94. std::unique_ptr<Network> network = std::make_unique<Network>(config);
  95. network->load_model(model_path);
  96. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  97. auto src_ptr = input->get_memory_ptr();
  98. auto src_layout = input->get_layout();
  99. input_tensor->reset(src_ptr, src_layout);
  100. network->forward();
  101. network->wait();
  102. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  103. Layout out_layout = output_tensor->get_layout();
  104. auto ret = std::make_shared<Tensor>(LiteDeviceType::LITE_CPU, out_layout);
  105. void* out_data = output_tensor->get_memory_ptr();
  106. void* dst_data = ret->get_memory_ptr();
  107. memcpy(dst_data, out_data, ret->get_tensor_total_size_in_byte());
  108. return ret;
  109. }
  110. __attribute__((unused)) static std::shared_ptr<Tensor> mgb_lar(
  111. std::string model_path, const Config& config, std::string input_name,
  112. std::shared_ptr<Tensor> input) {
  113. LITE_ASSERT(config.bare_model_cryption_name.size() == 0);
  114. using namespace mgb;
  115. serialization::GraphLoader::LoadConfig mgb_config;
  116. mgb_config.comp_node_mapper = [config](CompNode::Locator& loc) {
  117. loc = to_compnode_locator(config.device_type);
  118. };
  119. mgb_config.comp_graph = ComputingGraph::make();
  120. auto&& graph_opt = mgb_config.comp_graph->options();
  121. if (config.options.weight_preprocess) {
  122. graph_opt.graph_opt.enable_weight_preprocess();
  123. }
  124. graph_opt.comp_node_seq_record_level =
  125. config.options.comp_node_seq_record_level;
  126. auto inp_file = mgb::serialization::InputFile::make_fs(model_path.c_str());
  127. auto format =
  128. serialization::GraphLoader::identify_graph_dump_format(*inp_file);
  129. mgb_assert(format.valid(),
  130. "invalid model: unknown model format, please make sure input "
  131. "file is generated by GraphDumper");
  132. auto loader =
  133. serialization::GraphLoader::make(std::move(inp_file), format.val());
  134. auto load_ret = loader->load(mgb_config, false);
  135. ComputingGraph::OutputSpec out_spec;
  136. std::vector<HostTensorND> output_tensors(load_ret.output_var_list.size());
  137. for (size_t i = 0; i < load_ret.output_var_list.size(); i++) {
  138. auto cb = [&output_tensors, i](const DeviceTensorND& dv) mutable {
  139. output_tensors[i].copy_from(dv);
  140. };
  141. out_spec.emplace_back(load_ret.output_var_list[i], std::move(cb));
  142. }
  143. auto func = load_ret.graph_compile(out_spec);
  144. auto& in = load_ret.tensor_map.find(input_name)->second;
  145. in->copy_from(*TensorHelper::implement(input)
  146. ->cast_final_safe<TensorImplDft>()
  147. .host_tensor());
  148. func->execute();
  149. func->wait();
  150. std::shared_ptr<Tensor> ret = std::make_shared<Tensor>(
  151. LiteDeviceType::LITE_CPU,
  152. to_lite_layout(output_tensors[0].layout()));
  153. auto mge_tensor = TensorHelper::implement(ret)
  154. ->cast_final_safe<TensorImplDft>()
  155. .host_tensor();
  156. mge_tensor->copy_from(output_tensors[0]);
  157. return ret;
  158. }
  159. } // namespace lite
  160. #endif
  161. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台