You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_common.h 7.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /**
  2. * \file test/test_common.h
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #pragma once
  12. #include "lite_build_config.h"
  13. #if LITE_BUILD_WITH_MGE
  14. #include "../src/misc.h"
  15. #include "../src/mge/network_impl.h"
  16. #include "../src/mge/common.h"
  17. #include "lite/network.h"
  18. #include "lite/tensor.h"
  19. #include "megbrain/tensor.h"
  20. #include "megbrain/graph/bases.h"
  21. #include "megbrain/plugin/opr_io_dump.h"
  22. #include "megbrain/plugin/profiler.h"
  23. #include "megbrain/serialization/extern_c_opr.h"
  24. #include "megbrain/serialization/file.h"
  25. #include "megbrain/serialization/load_dump_config.h"
  26. #include "megbrain/serialization/serializer.h"
  27. #include "megbrain/utils/thin/hash_table.h"
  28. #include "npy.h"
  29. #include <gtest/gtest.h>
  30. #include <string.h>
  31. #include <chrono>
  32. #include <memory>
  33. #include <random>
  34. namespace lite {
  35. template <typename T>
  36. static ::testing::AssertionResult compare_memory(const void* memory0,
  37. const void* memory1,
  38. size_t length,
  39. float maxerr = 1e-3) {
  40. const T* data_ptr0 = static_cast<const T*>(memory0);
  41. const T* data_ptr1 = static_cast<const T*>(memory1);
  42. for (size_t i = 0; i < length; i++) {
  43. auto diff = std::abs(data_ptr0[i] - data_ptr1[i]);
  44. if (diff > maxerr) {
  45. return ::testing::AssertionFailure()
  46. << "Unequal value:\n"
  47. << "value 0 = " << data_ptr0[i] << "\n"
  48. << "value 1 = " << data_ptr1[i] << "\n"
  49. << "At index: " << i << "\n";
  50. }
  51. }
  52. return ::testing::AssertionSuccess();
  53. }
  54. template <typename T>
  55. void compare_lite_tensor(std::shared_ptr<Tensor> tensor0,
  56. std::shared_ptr<Tensor> tensor1, float maxerr = 1e-3) {
  57. size_t elemsize = tensor0->get_layout().get_elem_size();
  58. T* data_ptr0 = static_cast<T*>(tensor0->get_memory_ptr());
  59. T* data_ptr1 = static_cast<T*>(tensor1->get_memory_ptr());
  60. size_t length = tensor0->get_tensor_total_size_in_byte() / elemsize;
  61. EXPECT_TRUE(compare_memory<T>(data_ptr0, data_ptr1, length, maxerr));
  62. }
  63. __attribute__((unused)) static std::shared_ptr<Tensor> get_input_data(
  64. std::string path) {
  65. std::string type_str;
  66. std::vector<npy::ndarray_len_t> stl_shape;
  67. std::vector<int8_t> raw;
  68. npy::LoadArrayFromNumpy(path, type_str, stl_shape, raw);
  69. auto lite_tensor = std::make_shared<Tensor>(LiteDeviceType::LITE_CPU);
  70. Layout layout;
  71. layout.ndim = stl_shape.size();
  72. const std::map<std::string, LiteDataType> type_map = {
  73. {"f4", LiteDataType::LITE_FLOAT},
  74. {"i4", LiteDataType::LITE_INT},
  75. {"i1", LiteDataType::LITE_INT8},
  76. {"u1", LiteDataType::LITE_UINT8}};
  77. layout.shapes[0] = 1;
  78. for (size_t i = 0; i < stl_shape.size(); i++) {
  79. layout.shapes[i] = static_cast<size_t>(stl_shape[i]);
  80. }
  81. for (auto& item : type_map) {
  82. if (type_str.find(item.first) != std::string::npos) {
  83. layout.data_type = item.second;
  84. break;
  85. }
  86. }
  87. lite_tensor->set_layout(layout);
  88. size_t length = lite_tensor->get_tensor_total_size_in_byte();
  89. void* dest = lite_tensor->get_memory_ptr();
  90. memcpy(dest, raw.data(), length);
  91. return lite_tensor;
  92. }
  93. __attribute__((unused)) static std::shared_ptr<Tensor> mgelite_lar(
  94. std::string model_path, const Config& config, std::string,
  95. std::shared_ptr<Tensor> input) {
  96. std::unique_ptr<Network> network = std::make_unique<Network>(config);
  97. network->load_model(model_path);
  98. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  99. auto src_ptr = input->get_memory_ptr();
  100. auto src_layout = input->get_layout();
  101. input_tensor->reset(src_ptr, src_layout);
  102. network->forward();
  103. network->wait();
  104. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  105. Layout out_layout = output_tensor->get_layout();
  106. auto ret = std::make_shared<Tensor>(LiteDeviceType::LITE_CPU, out_layout);
  107. void* out_data = output_tensor->get_memory_ptr();
  108. void* dst_data = ret->get_memory_ptr();
  109. memcpy(dst_data, out_data, ret->get_tensor_total_size_in_byte());
  110. return ret;
  111. }
  112. __attribute__((unused)) static std::shared_ptr<Tensor> mgb_lar(
  113. std::string model_path, const Config& config, std::string input_name,
  114. std::shared_ptr<Tensor> input) {
  115. LITE_ASSERT(config.bare_model_cryption_name.size() == 0);
  116. using namespace mgb;
  117. serialization::GraphLoader::LoadConfig mgb_config;
  118. mgb_config.comp_node_mapper = [config](CompNode::Locator& loc) {
  119. loc = to_compnode_locator(config.device_type);
  120. };
  121. mgb_config.comp_graph = ComputingGraph::make();
  122. auto&& graph_opt = mgb_config.comp_graph->options();
  123. if (config.options.weight_preprocess) {
  124. graph_opt.graph_opt.enable_weight_preprocess();
  125. }
  126. graph_opt.comp_node_seq_record_level =
  127. config.options.comp_node_seq_record_level;
  128. auto inp_file = mgb::serialization::InputFile::make_fs(model_path.c_str());
  129. auto format =
  130. serialization::GraphLoader::identify_graph_dump_format(*inp_file);
  131. mgb_assert(format.valid(),
  132. "invalid model: unknown model format, please make sure input "
  133. "file is generated by GraphDumper");
  134. auto loader =
  135. serialization::GraphLoader::make(std::move(inp_file), format.val());
  136. auto load_ret = loader->load(mgb_config, false);
  137. ComputingGraph::OutputSpec out_spec;
  138. std::vector<HostTensorND> output_tensors(load_ret.output_var_list.size());
  139. for (size_t i = 0; i < load_ret.output_var_list.size(); i++) {
  140. auto cb = [&output_tensors, i](const DeviceTensorND& dv) mutable {
  141. output_tensors[i].copy_from(dv);
  142. };
  143. out_spec.emplace_back(load_ret.output_var_list[i], std::move(cb));
  144. }
  145. auto func = load_ret.graph_compile(out_spec);
  146. auto& in = load_ret.tensor_map.find(input_name)->second;
  147. in->copy_from(*TensorHelper::implement(input)
  148. ->cast_final_safe<TensorImplDft>()
  149. .host_tensor());
  150. func->execute();
  151. func->wait();
  152. std::shared_ptr<Tensor> ret = std::make_shared<Tensor>(
  153. LiteDeviceType::LITE_CPU,
  154. to_lite_layout(output_tensors[0].layout()));
  155. auto mge_tensor = TensorHelper::implement(ret)
  156. ->cast_final_safe<TensorImplDft>()
  157. .host_tensor();
  158. mge_tensor->copy_from(output_tensors[0]);
  159. return ret;
  160. }
  161. } // namespace lite
  162. #endif
  163. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台