You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

reset_io.cpp 2.9 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. #include "example.h"
  2. #if LITE_BUILD_WITH_MGE
  3. using namespace lite;
  4. using namespace example;
  5. namespace {
  6. bool reset_input(const Args& args) {
  7. std::string network_path = args.model_path;
  8. std::string input_path = args.input_path;
  9. lite::Config config;
  10. //! create and load the network
  11. std::shared_ptr<Network> network = std::make_shared<Network>(config);
  12. network->load_model(network_path);
  13. //! set input data to input tensor
  14. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  15. auto layout = input_tensor->get_layout();
  16. auto src_tensor = parse_npy(input_path);
  17. void* src = src_tensor->get_memory_ptr();
  18. input_tensor->reset(src, layout);
  19. //! forward
  20. network->forward();
  21. network->wait();
  22. //! 6. get the output data or read tensor set in network_in
  23. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  24. void* out_data = output_tensor->get_memory_ptr();
  25. size_t out_length = output_tensor->get_tensor_total_size_in_byte() /
  26. output_tensor->get_layout().get_elem_size();
  27. float max = -1.0f;
  28. float sum = 0.0f;
  29. for (size_t i = 0; i < out_length; i++) {
  30. float data = static_cast<float*>(out_data)[i];
  31. sum += data;
  32. if (max < data)
  33. max = data;
  34. }
  35. printf("max=%e, sum=%e\n", max, sum);
  36. return true;
  37. }
  38. bool reset_input_output(const Args& args) {
  39. std::string network_path = args.model_path;
  40. std::string input_path = args.input_path;
  41. lite::Config config;
  42. //! create and load the network
  43. std::shared_ptr<Network> network = std::make_shared<Network>(config);
  44. network->load_model(network_path);
  45. //! set input data to input tensor
  46. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  47. auto layout = input_tensor->get_layout();
  48. auto src_tensor = parse_npy(input_path);
  49. void* src = src_tensor->get_memory_ptr();
  50. input_tensor->reset(src, layout);
  51. //! set output ptr to store the network output
  52. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  53. auto result_tensor = std::make_shared<Tensor>(
  54. LiteDeviceType::LITE_CPU, Layout{{1, 1000}, 2, LiteDataType::LITE_FLOAT});
  55. void* out_data = result_tensor->get_memory_ptr();
  56. output_tensor->reset(out_data, result_tensor->get_layout());
  57. network->forward();
  58. network->wait();
  59. float max = -1.0f;
  60. float sum = 0.0f;
  61. for (size_t i = 0; i < 1000; i++) {
  62. float data = static_cast<float*>(out_data)[i];
  63. sum += data;
  64. if (max < data)
  65. max = data;
  66. }
  67. printf("max=%e, sum=%e\n", max, sum);
  68. return true;
  69. }
  70. } // namespace
  71. REGIST_EXAMPLE("reset_input", reset_input);
  72. REGIST_EXAMPLE("reset_input_output", reset_input_output);
  73. #endif
  74. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}