You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

network_share_weights.cpp 3.1 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. /**
  2. * \file example/cpp_example/network_share_weights.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "example.h"
  12. #if LITE_BUILD_WITH_MGE
  13. using namespace lite;
  14. using namespace example;
  15. namespace {
  16. bool network_share_same_weights(const Args& args) {
  17. std::string network_path = args.model_path;
  18. std::string input_path = args.input_path;
  19. //! create and load the network
  20. std::shared_ptr<Network> network = std::make_shared<Network>();
  21. network->load_model(network_path);
  22. //! load a new network from the created network and share the same weights,
  23. Config config_new;
  24. config_new.options.const_shape = true;
  25. NetworkIO network_io_new;
  26. std::shared_ptr<Network> weight_shared_network =
  27. std::make_shared<Network>(config_new, network_io_new);
  28. Runtime::shared_weight_with_network(weight_shared_network, network);
  29. //! set input data to input tensor
  30. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  31. void* dst_ptr = input_tensor->get_memory_ptr();
  32. std::shared_ptr<Tensor> input_tensor2 = weight_shared_network->get_input_tensor(0);
  33. void* dst_ptr2 = input_tensor2->get_memory_ptr();
  34. //! copy or forward data to network
  35. size_t length = input_tensor->get_tensor_total_size_in_byte();
  36. auto src_tensor = parse_npy(input_path);
  37. void* src = src_tensor->get_memory_ptr();
  38. memcpy(dst_ptr, src, length);
  39. memcpy(dst_ptr2, src, length);
  40. //! forward
  41. network->forward();
  42. network->wait();
  43. weight_shared_network->forward();
  44. weight_shared_network->wait();
  45. //! get the output data or read tensor set in network_in
  46. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  47. std::shared_ptr<Tensor> output_tensor2 =
  48. weight_shared_network->get_output_tensor(0);
  49. void* out_data = output_tensor->get_memory_ptr();
  50. void* out_data2 = output_tensor2->get_memory_ptr();
  51. size_t out_length = output_tensor->get_tensor_total_size_in_byte() /
  52. output_tensor->get_layout().get_elem_size();
  53. printf("length=%zu\n", length);
  54. float max = -1.0f;
  55. float sum = 0.0f;
  56. for (size_t i = 0; i < out_length; i++) {
  57. float data = static_cast<float*>(out_data)[i];
  58. float data2 = static_cast<float*>(out_data2)[i];
  59. if (data != data2) {
  60. printf("the result between the origin network and weight share "
  61. "netwrok is different.\n");
  62. }
  63. sum += data;
  64. if (max < data)
  65. max = data;
  66. }
  67. printf("max=%e, sum=%e\n", max, sum);
  68. return true;
  69. }
  70. } // namespace
  71. REGIST_EXAMPLE("network_share_same_weights", network_share_same_weights);
  72. #endif
  73. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}