You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model_serialize_imp.h 3.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef INC_GRAPH_DETAIL_MODEL_SERIALIZE_IMP_H_
  17. #define INC_GRAPH_DETAIL_MODEL_SERIALIZE_IMP_H_
  18. #include <map>
  19. #include <memory>
  20. #include <string>
  21. #include <vector>
  22. #include "graph/anchor.h"
  23. #include "graph/detail/attributes_holder.h"
  24. #include "graph/ge_tensor.h"
  25. #include "graph/graph.h"
  26. #include "graph/node.h"
  27. namespace ge {
  28. using ComputeGraphPtr = std::shared_ptr<ComputeGraph>;
  29. struct NodeNameGraphReq {
  30. string node_name;
  31. int32_t index;
  32. ComputeGraphPtr graph;
  33. };
  34. struct NodeNameNodeReq {
  35. string src_node_name;
  36. int32_t src_out_index;
  37. NodePtr dst_node;
  38. int32_t dst_in_index;
  39. string dst_node_name;
  40. };
  41. class ModelSerializeImp {
  42. public:
  43. bool SerializeModel(const Model &model, proto::ModelDef *modeProto, bool is_dump = false);
  44. bool SerializeGraph(const ConstComputeGraphPtr &graph, proto::GraphDef *graphProto, bool is_dump = false);
  45. bool SerializeEdge(const NodePtr &node, proto::OpDef *opDefProto);
  46. bool SerializeOpDesc(const ConstOpDescPtr &node, proto::OpDef *opDefProto, bool is_dump = false);
  47. bool SerializeNode(const NodePtr &node, proto::OpDef *opDefProto, bool is_dump = false);
  48. bool SerializeTensor(const ConstGeTensorPtr &tensor, proto::TensorDef *tensorProto);
  49. bool UnserializeModel(Model &model, proto::ModelDef &modeProto);
  50. bool UnserializeGraphWithoutEdge(ComputeGraphPtr &graph, proto::GraphDef &graphProto);
  51. bool UnserializeGraph(ComputeGraphPtr &graph, proto::GraphDef &graphProto);
  52. bool HandleNodeNameRef();
  53. bool UnserializeOpDesc(OpDescPtr &opDesc, proto::OpDef &opDefProto);
  54. void AttrDefToOpDesc(OpDescPtr &op_desc, std::vector<string> &key_in, std::vector<string> &key_out,
  55. std::vector<uint32_t> &value_in, std::vector<uint32_t> &value_out, std::vector<string> &opt);
  56. void OpDescToAttrDef(const ConstOpDescPtr &op_desc, proto::OpDef *op_def_proto);
  57. bool UnserializeNode(ComputeGraphPtr &graph, proto::OpDef &opDefProto);
  58. bool UnserializeTensor(GeTensorPtr &tensor, proto::TensorDef &tensorProto);
  59. bool ParseNodeIndex(const string &node_index, string &nodeName, int32_t &index);
  60. void SetProtobufOwner(const ProtoMsgOwner &bufferProtobufOnwer) { protobuf_owner_ = bufferProtobufOnwer; }
  61. private:
  62. bool RebuildOwnership(ComputeGraphPtr &compute_graph, std::map<std::string, ComputeGraphPtr> &subgraphs);
  63. std::vector<NodeNameGraphReq> graph_input_node_names_;
  64. std::vector<NodeNameGraphReq> graph_output_node_names_;
  65. std::vector<NodeNameNodeReq> node_input_node_names_;
  66. std::map<string, NodePtr> node_map_;
  67. ProtoMsgOwner protobuf_owner_;
  68. };
  69. } // namespace ge
  70. #endif // INC_GRAPH_DETAIL_MODEL_SERIALIZE_IMP_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示