You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

reset_io.cpp 3.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. /**
  2. * \file example/cpp_example/reset_io.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "example.h"
  12. #if LITE_BUILD_WITH_MGE
  13. using namespace lite;
  14. using namespace example;
  15. namespace {
  16. bool reset_input(const Args& args) {
  17. std::string network_path = args.model_path;
  18. std::string input_path = args.input_path;
  19. lite::Config config;
  20. //! create and load the network
  21. std::shared_ptr<Network> network = std::make_shared<Network>(config);
  22. network->load_model(network_path);
  23. //! set input data to input tensor
  24. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  25. auto layout = input_tensor->get_layout();
  26. auto src_tensor = parse_npy(input_path);
  27. void* src = src_tensor->get_memory_ptr();
  28. input_tensor->reset(src, layout);
  29. //! forward
  30. network->forward();
  31. network->wait();
  32. //! 6. get the output data or read tensor set in network_in
  33. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  34. void* out_data = output_tensor->get_memory_ptr();
  35. size_t out_length = output_tensor->get_tensor_total_size_in_byte() /
  36. output_tensor->get_layout().get_elem_size();
  37. float max = -1.0f;
  38. float sum = 0.0f;
  39. for (size_t i = 0; i < out_length; i++) {
  40. float data = static_cast<float*>(out_data)[i];
  41. sum += data;
  42. if (max < data)
  43. max = data;
  44. }
  45. printf("max=%e, sum=%e\n", max, sum);
  46. return true;
  47. }
  48. bool reset_input_output(const Args& args) {
  49. std::string network_path = args.model_path;
  50. std::string input_path = args.input_path;
  51. lite::Config config;
  52. //! create and load the network
  53. std::shared_ptr<Network> network = std::make_shared<Network>(config);
  54. network->load_model(network_path);
  55. //! set input data to input tensor
  56. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  57. auto layout = input_tensor->get_layout();
  58. auto src_tensor = parse_npy(input_path);
  59. void* src = src_tensor->get_memory_ptr();
  60. input_tensor->reset(src, layout);
  61. //! set output ptr to store the network output
  62. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  63. auto result_tensor = std::make_shared<Tensor>(
  64. LiteDeviceType::LITE_CPU, Layout{{1, 1000}, 2, LiteDataType::LITE_FLOAT});
  65. void* out_data = result_tensor->get_memory_ptr();
  66. output_tensor->reset(out_data, result_tensor->get_layout());
  67. network->forward();
  68. network->wait();
  69. float max = -1.0f;
  70. float sum = 0.0f;
  71. for (size_t i = 0; i < 1000; i++) {
  72. float data = static_cast<float*>(out_data)[i];
  73. sum += data;
  74. if (max < data)
  75. max = data;
  76. }
  77. printf("max=%e, sum=%e\n", max, sum);
  78. return true;
  79. }
  80. } // namespace
  81. REGIST_EXAMPLE("reset_input", reset_input);
  82. REGIST_EXAMPLE("reset_input_output", reset_input_output);
  83. #endif
  84. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}