You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

user_cryption.cpp 4.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. /**
  2. * \file example/cpp_example/user_cryption.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "../example.h"
  12. #if LITE_BUILD_WITH_MGE
  13. using namespace lite;
  14. using namespace example;
  15. namespace {
  16. std::vector<uint8_t> decrypt_model(const void* model_mem, size_t size,
  17. const std::vector<uint8_t>& key) {
  18. if (key.size() == 1) {
  19. std::vector<uint8_t> ret(size, 0);
  20. const uint8_t* ptr = static_cast<const uint8_t*>(model_mem);
  21. uint8_t key_data = key[0];
  22. for (size_t i = 0; i < size; i++) {
  23. ret[i] = ptr[i] ^ key_data ^ key_data;
  24. }
  25. return ret;
  26. } else {
  27. printf("the user define decrypt method key length is wrong.\n");
  28. return {};
  29. }
  30. }
  31. } // namespace
  32. bool lite::example::register_cryption_method(const Args& args) {
  33. std::string network_path = args.model_path;
  34. std::string input_path = args.input_path;
  35. //! register the decryption method
  36. register_decryption_and_key("just_for_test", decrypt_model, {15});
  37. lite::Config config;
  38. config.bare_model_cryption_name = "just_for_test";
  39. //! create and load the network
  40. std::shared_ptr<Network> network = std::make_shared<Network>(config);
  41. network->load_model(network_path);
  42. //! set input data to input tensor
  43. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  44. auto layout = input_tensor->get_layout();
  45. auto src_tensor = parse_npy(input_path);
  46. void* src = src_tensor->get_memory_ptr();
  47. input_tensor->reset(src, layout);
  48. //! forward
  49. network->forward();
  50. network->wait();
  51. //! get the output data or read tensor set in network_in
  52. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  53. void* out_data = output_tensor->get_memory_ptr();
  54. size_t out_length = output_tensor->get_tensor_total_size_in_byte() /
  55. output_tensor->get_layout().get_elem_size();
  56. float max = -1.0f;
  57. float sum = 0.0f;
  58. for (size_t i = 0; i < out_length; i++) {
  59. float data = static_cast<float*>(out_data)[i];
  60. sum += data;
  61. if (max < data)
  62. max = data;
  63. }
  64. printf("max=%e, sum=%e\n", max, sum);
  65. return true;
  66. }
  67. bool lite::example::update_cryption_key(const Args& args) {
  68. std::string network_path = args.model_path;
  69. std::string input_path = args.input_path;
  70. //! update the decryption method key
  71. std::vector<uint8_t> key(32, 0);
  72. for (size_t i = 0; i < 32; i++) {
  73. key[i] = 31 - i;
  74. }
  75. update_decryption_or_key("AES_default", nullptr, key);
  76. lite::Config config;
  77. config.bare_model_cryption_name = "AES_default";
  78. //! create and load the network
  79. std::shared_ptr<Network> network = std::make_shared<Network>(config);
  80. network->load_model(network_path);
  81. //! set input data to input tensor
  82. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  83. auto layout = input_tensor->get_layout();
  84. auto src_tensor = parse_npy(input_path);
  85. void* src = src_tensor->get_memory_ptr();
  86. input_tensor->reset(src, layout);
  87. //! forward
  88. network->forward();
  89. network->wait();
  90. //! get the output data or read tensor set in network_in
  91. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  92. void* out_data = output_tensor->get_memory_ptr();
  93. size_t out_length = output_tensor->get_tensor_total_size_in_byte() /
  94. output_tensor->get_layout().get_elem_size();
  95. float max = -1.0f;
  96. float sum = 0.0f;
  97. for (size_t i = 0; i < out_length; i++) {
  98. float data = static_cast<float*>(out_data)[i];
  99. sum += data;
  100. if (max < data)
  101. max = data;
  102. }
  103. printf("max=%e, sum=%e\n", max, sum);
  104. return true;
  105. }
  106. #endif
  107. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台