You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_common.h 6.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. #pragma once
  2. #include "lite_build_config.h"
  3. #if LITE_BUILD_WITH_MGE
  4. #include "../src/mge/common.h"
  5. #include "../src/mge/network_impl.h"
  6. #include "../src/misc.h"
  7. #include "lite/network.h"
  8. #include "lite/tensor.h"
  9. #include "megbrain/graph/bases.h"
  10. #include "megbrain/plugin/opr_io_dump.h"
  11. #include "megbrain/plugin/profiler.h"
  12. #include "megbrain/serialization/extern_c_opr.h"
  13. #include "megbrain/serialization/file.h"
  14. #include "megbrain/serialization/load_dump_config.h"
  15. #include "megbrain/serialization/serializer.h"
  16. #include "megbrain/tensor.h"
  17. #include "megbrain/utils/thin/hash_table.h"
  18. #include "npy.h"
  19. #include <gtest/gtest.h>
  20. #include <string.h>
  21. #include <chrono>
  22. #include <memory>
  23. #include <random>
  24. namespace lite {
  25. template <typename T>
  26. static ::testing::AssertionResult compare_memory(
  27. const void* memory0, const void* memory1, size_t length, float maxerr = 1e-3) {
  28. const T* data_ptr0 = static_cast<const T*>(memory0);
  29. const T* data_ptr1 = static_cast<const T*>(memory1);
  30. for (size_t i = 0; i < length; i++) {
  31. auto diff = std::abs(data_ptr0[i] - data_ptr1[i]);
  32. if (diff > maxerr) {
  33. return ::testing::AssertionFailure() << "Unequal value:\n"
  34. << "value 0 = " << data_ptr0[i] << "\n"
  35. << "value 1 = " << data_ptr1[i] << "\n"
  36. << "At index: " << i << "\n";
  37. }
  38. }
  39. return ::testing::AssertionSuccess();
  40. }
  41. template <typename T>
  42. void compare_lite_tensor(
  43. std::shared_ptr<Tensor> tensor0, std::shared_ptr<Tensor> tensor1,
  44. float maxerr = 1e-3) {
  45. size_t elemsize = tensor0->get_layout().get_elem_size();
  46. T* data_ptr0 = static_cast<T*>(tensor0->get_memory_ptr());
  47. T* data_ptr1 = static_cast<T*>(tensor1->get_memory_ptr());
  48. size_t length = tensor0->get_tensor_total_size_in_byte() / elemsize;
  49. EXPECT_TRUE(compare_memory<T>(data_ptr0, data_ptr1, length, maxerr));
  50. }
  51. __attribute__((unused)) static std::shared_ptr<Tensor> get_input_data(
  52. std::string path) {
  53. std::string type_str;
  54. std::vector<npy::ndarray_len_t> stl_shape;
  55. std::vector<int8_t> raw;
  56. npy::LoadArrayFromNumpy(path, type_str, stl_shape, raw);
  57. auto lite_tensor = std::make_shared<Tensor>(LiteDeviceType::LITE_CPU);
  58. Layout layout;
  59. layout.ndim = stl_shape.size();
  60. const std::map<std::string, LiteDataType> type_map = {
  61. {"f4", LiteDataType::LITE_FLOAT}, {"f2", LiteDataType::LITE_HALF},
  62. {"i8", LiteDataType::LITE_INT64}, {"i4", LiteDataType::LITE_INT},
  63. {"u4", LiteDataType::LITE_UINT}, {"i2", LiteDataType::LITE_INT16},
  64. {"u2", LiteDataType::LITE_UINT16}, {"i1", LiteDataType::LITE_INT8},
  65. {"u1", LiteDataType::LITE_UINT8}};
  66. layout.shapes[0] = 1;
  67. for (size_t i = 0; i < stl_shape.size(); i++) {
  68. layout.shapes[i] = static_cast<size_t>(stl_shape[i]);
  69. }
  70. for (auto& item : type_map) {
  71. if (type_str.find(item.first) != std::string::npos) {
  72. layout.data_type = item.second;
  73. break;
  74. }
  75. }
  76. lite_tensor->set_layout(layout);
  77. size_t length = lite_tensor->get_tensor_total_size_in_byte();
  78. void* dest = lite_tensor->get_memory_ptr();
  79. memcpy(dest, raw.data(), length);
  80. return lite_tensor;
  81. }
  82. __attribute__((unused)) static std::shared_ptr<Tensor> mgelite_lar(
  83. std::string model_path, const Config& config, std::string,
  84. std::shared_ptr<Tensor> input) {
  85. std::unique_ptr<Network> network = std::make_unique<Network>(config);
  86. network->load_model(model_path);
  87. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  88. auto src_ptr = input->get_memory_ptr();
  89. auto src_layout = input->get_layout();
  90. input_tensor->reset(src_ptr, src_layout);
  91. network->forward();
  92. network->wait();
  93. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  94. Layout out_layout = output_tensor->get_layout();
  95. auto ret = std::make_shared<Tensor>(LiteDeviceType::LITE_CPU, out_layout);
  96. void* out_data = output_tensor->get_memory_ptr();
  97. void* dst_data = ret->get_memory_ptr();
  98. memcpy(dst_data, out_data, ret->get_tensor_total_size_in_byte());
  99. return ret;
  100. }
  101. __attribute__((unused)) static std::shared_ptr<Tensor> mgb_lar(
  102. std::string model_path, const Config& config, std::string input_name,
  103. std::shared_ptr<Tensor> input) {
  104. LITE_ASSERT(config.bare_model_cryption_name.size() == 0);
  105. using namespace mgb;
  106. serialization::GraphLoader::LoadConfig mgb_config;
  107. mgb_config.comp_node_mapper = [config](CompNode::Locator& loc) {
  108. loc = to_compnode_locator(config.device_type);
  109. };
  110. mgb_config.comp_graph = ComputingGraph::make();
  111. auto&& graph_opt = mgb_config.comp_graph->options();
  112. if (config.options.weight_preprocess) {
  113. graph_opt.graph_opt.enable_weight_preprocess();
  114. }
  115. graph_opt.comp_node_seq_record_level = config.options.comp_node_seq_record_level;
  116. auto inp_file = mgb::serialization::InputFile::make_fs(model_path.c_str());
  117. auto format = serialization::GraphLoader::identify_graph_dump_format(*inp_file);
  118. mgb_assert(
  119. format.valid(),
  120. "invalid model: unknown model format, please make sure input "
  121. "file is generated by GraphDumper");
  122. auto loader = serialization::GraphLoader::make(std::move(inp_file), format.val());
  123. auto load_ret = loader->load(mgb_config, false);
  124. ComputingGraph::OutputSpec out_spec;
  125. std::vector<HostTensorND> output_tensors(load_ret.output_var_list.size());
  126. for (size_t i = 0; i < load_ret.output_var_list.size(); i++) {
  127. auto cb = [&output_tensors, i](const DeviceTensorND& dv) mutable {
  128. output_tensors[i].copy_from(dv);
  129. };
  130. out_spec.emplace_back(load_ret.output_var_list[i], std::move(cb));
  131. }
  132. auto func = load_ret.graph_compile(out_spec);
  133. auto& in = load_ret.tensor_map.find(input_name)->second;
  134. in->copy_from(*TensorHelper::implement(input)
  135. ->cast_final_safe<TensorImplDft>()
  136. .host_tensor());
  137. func->execute();
  138. func->wait();
  139. std::shared_ptr<Tensor> ret = std::make_shared<Tensor>(
  140. LiteDeviceType::LITE_CPU, to_lite_layout(output_tensors[0].layout()));
  141. auto mge_tensor = TensorHelper::implement(ret)
  142. ->cast_final_safe<TensorImplDft>()
  143. .host_tensor();
  144. mge_tensor->copy_from(output_tensors[0]);
  145. return ret;
  146. }
  147. } // namespace lite
  148. #endif
  149. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}