You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

user_cryption.cpp 4.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /**
  2. * \file example/user_cryption.cpp
  3. *
  4. * This file is part of MegEngine, a deep learning framework developed by
  5. * Megvii.
  6. *
  7. * \copyright Copyright (c) 2020-2021 Megvii Inc. All rights reserved.
  8. */
  9. #include "../example.h"
  10. #if LITE_BUILD_WITH_MGE
  11. using namespace lite;
  12. using namespace example;
  13. namespace {
  14. std::vector<uint8_t> decrypt_model(const void* model_mem, size_t size,
  15. const std::vector<uint8_t>& key) {
  16. if (key.size() == 1) {
  17. std::vector<uint8_t> ret(size, 0);
  18. const uint8_t* ptr = static_cast<const uint8_t*>(model_mem);
  19. uint8_t key_data = key[0];
  20. for (size_t i = 0; i < size; i++) {
  21. ret[i] = ptr[i] ^ key_data ^ key_data;
  22. }
  23. return ret;
  24. } else {
  25. printf("the user define decrypt method key length is wrong.\n");
  26. return {};
  27. }
  28. }
  29. } // namespace
  30. bool lite::example::register_cryption_method(const Args& args) {
  31. std::string network_path = args.model_path;
  32. std::string input_path = args.input_path;
  33. //! register the decryption method
  34. register_decryption_and_key("just_for_test", decrypt_model, {15});
  35. lite::Config config;
  36. config.bare_model_cryption_name = "just_for_test";
  37. //! create and load the network
  38. std::shared_ptr<Network> network = std::make_shared<Network>(config);
  39. network->load_model(network_path);
  40. //! set input data to input tensor
  41. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  42. auto layout = input_tensor->get_layout();
  43. auto src_tensor = parse_npy(input_path);
  44. void* src = src_tensor->get_memory_ptr();
  45. input_tensor->reset(src, layout);
  46. //! forward
  47. network->forward();
  48. network->wait();
  49. //! get the output data or read tensor set in network_in
  50. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  51. void* out_data = output_tensor->get_memory_ptr();
  52. size_t out_length = output_tensor->get_tensor_total_size_in_byte() /
  53. output_tensor->get_layout().get_elem_size();
  54. float max = -1.0f;
  55. float sum = 0.0f;
  56. for (size_t i = 0; i < out_length; i++) {
  57. float data = static_cast<float*>(out_data)[i];
  58. sum += data;
  59. if (max < data)
  60. max = data;
  61. }
  62. printf("max=%e, sum=%e\n", max, sum);
  63. return true;
  64. }
  65. bool lite::example::update_cryption_key(const Args& args) {
  66. std::string network_path = args.model_path;
  67. std::string input_path = args.input_path;
  68. //! update the decryption method key
  69. std::vector<uint8_t> key(32, 0);
  70. for (size_t i = 0; i < 32; i++) {
  71. key[i] = 31 - i;
  72. }
  73. update_decryption_or_key("AES_default", nullptr, key);
  74. lite::Config config;
  75. config.bare_model_cryption_name = "AES_default";
  76. //! create and load the network
  77. std::shared_ptr<Network> network = std::make_shared<Network>(config);
  78. network->load_model(network_path);
  79. //! set input data to input tensor
  80. std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0);
  81. auto layout = input_tensor->get_layout();
  82. auto src_tensor = parse_npy(input_path);
  83. void* src = src_tensor->get_memory_ptr();
  84. input_tensor->reset(src, layout);
  85. //! forward
  86. network->forward();
  87. network->wait();
  88. //! get the output data or read tensor set in network_in
  89. std::shared_ptr<Tensor> output_tensor = network->get_output_tensor(0);
  90. void* out_data = output_tensor->get_memory_ptr();
  91. size_t out_length = output_tensor->get_tensor_total_size_in_byte() /
  92. output_tensor->get_layout().get_elem_size();
  93. float max = -1.0f;
  94. float sum = 0.0f;
  95. for (size_t i = 0; i < out_length; i++) {
  96. float data = static_cast<float*>(out_data)[i];
  97. sum += data;
  98. if (max < data)
  99. max = data;
  100. }
  101. printf("max=%e, sum=%e\n", max, sum);
  102. return true;
  103. }
  104. #endif
  105. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台