You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model_builder.cc 36 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/build/model_builder.h"
  17. #include <securectype.h>
  18. #include <iostream>
  19. #include <set>
  20. #include <unordered_map>
  21. #include "common/ge/ge_util.h"
  22. #include "framework/common/debug/ge_log.h"
  23. #include "graph/anchor.h"
  24. #include "graph/attr_value.h"
  25. #include "graph/buffer.h"
  26. #include "graph/build/stream_allocator.h"
  27. #include "graph/common/omg_util.h"
  28. #include "graph/common/ge_call_wrapper.h"
  29. #include "graph/common/local_context.h"
  30. #include "graph/debug/ge_attr_define.h"
  31. #include "graph/ge_attr_value.h"
  32. #include "graph/ge_context.h"
  33. #include "graph/ge_error_codes.h"
  34. #include "graph/manager/graph_mem_allocator.h"
  35. #include "graph/manager/graph_var_manager.h"
  36. #include "graph/optimize/common/params.h"
  37. #include "graph/types.h"
  38. #include "graph/utils/attr_utils.h"
  39. #include "graph/utils/graph_utils.h"
  40. #include "graph/utils/node_utils.h"
  41. #include "graph/utils/op_desc_utils.h"
  42. #include "graph/utils/tensor_utils.h"
  43. #include "graph/utils/type_utils.h"
  44. #include "init/gelib.h"
  45. #include "memory/memory_assigner.h"
  46. #include "omg/version.h"
  47. #include "register/op_registry.h"
  48. #include "graph/passes/set_input_output_offset_pass.h"
  49. using std::map;
  50. using std::set;
  51. using std::string;
  52. using std::vector;
  53. namespace {
  54. const uint32_t kWeightsStartOffset = 512;
  55. const int32_t kWrongIndex = -2;
  56. const float kImgRatioYUV420SP_U8 = 1.5;
  57. const int kImgRatioRGB888_U8 = 3;
  58. const int kImgRatioNC1HWC0DI_FP16 = 12;
  59. const int kInvalidIndexNum = -1;
  60. const uint32_t kInputDimensions2D = 2;
  61. const uint32_t kInputDimensions3D = 3;
  62. const char *const kVectorCore = "VectorCore";
  63. const char *const kCoreType = "ge.engineType";
  64. const std::string kEnableL1Fusion = "ge.l1Fusion";
  65. const set<string> adjust_layer_type_ = {ge::CONVOLUTION};
  66. bool IsGeLocalOp(const ge::ConstOpDescPtr &op_desc) {
  67. auto type = op_desc->GetType();
  68. if (type == ge::CONSTANTOP) {
  69. // constant op just has one output
  70. ge::GeTensorDesc output_desc = op_desc->GetOutputDesc(0);
  71. return !(output_desc.GetDataType() == ge::DT_STRING);
  72. }
  73. const set<string> ge_local_set = {ge::STREAMMERGE, ge::MEMCPYASYNC, ge::STREAMACTIVE, ge::STREAMSWITCH,
  74. ge::VARIABLE, ge::NOOP, ge::CONSTANT, ge::ENTER,
  75. ge::REFENTER, ge::LOOPCOND, ge::NEXTITERATION, ge::REFNEXTITERATION,
  76. ge::EXIT, ge::REFEXIT, ge::MERGE, ge::MEMCPYADDRASYNC};
  77. return (ge_local_set.find(type) != ge_local_set.end());
  78. }
  79. } // namespace
  80. namespace ge {
  81. ModelBuilder::ModelBuilder(uint64_t session_id, ge::ComputeGraphPtr compute_graph,
  82. const Graph2SubGraphInfoList &subgraphs, const map<string, int> &stream_max_parallel_num,
  83. bool hcom_parallel, int mode)
  84. : session_id_(session_id),
  85. weight_offset_(kWeightsStartOffset),
  86. compute_graph_(std::move(compute_graph)),
  87. subgraphs_(subgraphs),
  88. stream_num_(0),
  89. event_num_(0),
  90. label_num_(0),
  91. stream_max_parallel_num_(stream_max_parallel_num),
  92. hcom_parallel_(hcom_parallel),
  93. build_mode_(mode),
  94. max_mem_offset_(0),
  95. p2p_mem_offset_(0),
  96. zero_copy_mem_size_(0),
  97. platform_type_(0),
  98. is_loop_graph_(false),
  99. is_l1_fusion_enable_(false) {}
  100. ModelBuilder::~ModelBuilder() {}
  101. Status ModelBuilder::CalcOutputSize(const ge::NodePtr &n) {
  102. GE_CHECK_NOTNULL(n);
  103. auto node_op_desc = n->GetOpDesc();
  104. GE_CHECK_NOTNULL(node_op_desc);
  105. uint32_t index = 0;
  106. for (const auto &output_desc_ptr : node_op_desc->GetAllOutputsDescPtr()) {
  107. GeTensorDesc &desc_temp = *output_desc_ptr;
  108. uint32_t dim_num = static_cast<uint32_t>(desc_temp.GetShape().GetDimNum());
  109. GE_IF_BOOL_EXEC(dim_num > DIM_DEFAULT_SIZE, TensorUtils::SetRealDimCnt(desc_temp, dim_num));
  110. // calculate tensor size
  111. int64_t size_temp = 0;
  112. graphStatus graph_status = TensorUtils::GetTensorMemorySizeInBytes(desc_temp, size_temp);
  113. if (graph_status != GRAPH_SUCCESS) {
  114. GELOGE(graph_status, "GetTensorMemorySizeInBytes failed!");
  115. return FAILED;
  116. }
  117. TensorUtils::SetSize(desc_temp, size_temp);
  118. if (node_op_desc->UpdateOutputDesc(index, desc_temp) != SUCCESS) {
  119. GELOGE(FAILED, "UpdateOutputDesc failed.");
  120. return FAILED;
  121. }
  122. GELOGD("update output desc, dim_size: %u, mem_size: %ld, format: %s, type: %s, node name:%s", dim_num, size_temp,
  123. TypeUtils::FormatToSerialString(desc_temp.GetFormat()).c_str(),
  124. TypeUtils::DataTypeToSerialString(desc_temp.GetDataType()).c_str(), node_op_desc->GetName().c_str());
  125. index++;
  126. }
  127. return SUCCESS;
  128. }
  129. bool ModelBuilder::SetInputConst(const OpDescPtr &op_desc, const NodePtr &src_node, size_t index,
  130. vector<bool> &is_input_const) {
  131. GELOGI("SetIsInputConst const: %s, source node: %s", op_desc->GetName().c_str(), src_node->GetName().c_str());
  132. for (size_t i = is_input_const.size(); i <= index; ++i) {
  133. is_input_const.push_back(false);
  134. }
  135. is_input_const[index] = true;
  136. vector<GeTensorPtr> weights = OpDescUtils::MutableWeights(src_node);
  137. if (weights.empty()) {
  138. GELOGW("SetInputIsConst weights is empty, node: %s", src_node->GetName().c_str());
  139. return false;
  140. }
  141. GeTensorPtr weight = weights[0];
  142. GE_IF_BOOL_EXEC(weight == nullptr, return true);
  143. GeTensorDesc &tensor_desc = weight->MutableTensorDesc();
  144. int64_t data_offset = 0;
  145. if (TensorUtils::GetDataOffset(tensor_desc, data_offset) != GRAPH_SUCCESS) {
  146. GELOGW("Get Offset from weight failed");
  147. return false;
  148. }
  149. auto input_tensor = op_desc->MutableInputDesc(static_cast<uint32_t>(index));
  150. if (input_tensor == nullptr) {
  151. GELOGW("Get input_tensor failed");
  152. return false;
  153. }
  154. TensorUtils::SetDataOffset(*input_tensor, data_offset);
  155. return true;
  156. }
  157. void ModelBuilder::SetInputIsConst(const ge::NodePtr &n) {
  158. auto node_op_desc = n->GetOpDesc();
  159. GE_CHECK_NOTNULL_JUST_RETURN(node_op_desc);
  160. auto is_input_const = node_op_desc->GetIsInputConst();
  161. // must set all true input_const to false
  162. for (size_t i = 0; i < is_input_const.size(); i++) {
  163. is_input_const[i] = false;
  164. }
  165. std::string const_type;
  166. auto in_data_anchors = n->GetAllInDataAnchors();
  167. for (size_t index = 0; index < in_data_anchors.size(); index++) {
  168. auto in_data_anchor = in_data_anchors.at(index);
  169. const auto &peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
  170. GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue);
  171. const auto &src_node = peer_out_anchor->GetOwnerNode();
  172. if (!NodeUtils::GetConstOpType(src_node, const_type)) {
  173. continue;
  174. }
  175. if (const_type == CONSTANT) {
  176. if (!SetInputConst(node_op_desc, src_node, index, is_input_const)) {
  177. return;
  178. }
  179. } else {
  180. if ((index < is_input_const.size()) && is_input_const[index]) {
  181. is_input_const[index] = false;
  182. }
  183. }
  184. }
  185. std::string input_const_info = ToString(is_input_const);
  186. GELOGD("update opdesc:%s InputConst:%s", node_op_desc->GetName().c_str(), input_const_info.c_str());
  187. node_op_desc->SetIsInputConst(is_input_const);
  188. }
  189. Status ModelBuilder::AdjustConstWeightSize(const ge::NodePtr &node, size_t &mem_offset) {
  190. GE_CHECK_NOTNULL(node);
  191. if (node->GetType() == CONSTANT) {
  192. vector<GeTensorPtr> weights = OpDescUtils::MutableWeights(node);
  193. if (weights.empty()) {
  194. GELOGE(FAILED, "weights size of node %s is empty", node->GetName().c_str());
  195. return FAILED;
  196. }
  197. GeTensorPtr weight = weights[0];
  198. if (weight == nullptr) {
  199. GELOGE(FAILED, "weights[0] is null.");
  200. return FAILED;
  201. }
  202. GeTensorDesc &tensor_desc = weight->MutableTensorDesc();
  203. size_t output_size = weight->GetData().size();
  204. TensorUtils::SetDataOffset(tensor_desc, mem_offset);
  205. GELOGD("Node: %s, weight size: %zu.", node->GetName().c_str(), output_size);
  206. mem_offset += output_size;
  207. }
  208. return SUCCESS;
  209. }
  210. Status ModelBuilder::SetInputOutputDesc() {
  211. Status ret;
  212. for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  213. auto node_op_desc = n->GetOpDesc();
  214. GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue);
  215. if (!is_loop_graph_ && node_op_desc->GetType() == LOOPCOND) {
  216. is_loop_graph_ = true;
  217. }
  218. // if user set input node format ND, the expected node for data and netoutput format is ND in
  219. // final graph.
  220. if ((GetLocalOmgContext().format == domi::DOMI_TENSOR_ND) && (!node_op_desc->HasAttr("_is_single_op")) &&
  221. ((node_op_desc->GetType() == DATA_TYPE) || (node_op_desc->GetType() == NETOUTPUT))) {
  222. auto inputDescsPtr = node_op_desc->GetAllInputsDescPtr();
  223. auto outputDescsPtr = node_op_desc->GetAllOutputsDescPtr();
  224. ge::Format format = ge::FORMAT_ND;
  225. for (auto &inputDescPtr : inputDescsPtr) {
  226. GE_CHECK_NOTNULL(inputDescPtr);
  227. inputDescPtr->SetFormat(format);
  228. inputDescPtr->SetOriginFormat(format);
  229. }
  230. for (auto &outputDescPtr : outputDescsPtr) {
  231. GE_CHECK_NOTNULL(outputDescPtr);
  232. outputDescPtr->SetFormat(format);
  233. outputDescPtr->SetOriginFormat(format);
  234. }
  235. }
  236. if (node_op_desc->GetType() == DATA_TYPE || node_op_desc->GetType() == AIPP_DATA_TYPE) {
  237. GELOGD("Data node: %s.", n->GetName().c_str());
  238. continue;
  239. }
  240. GE_IF_BOOL_EXEC(n->GetInAllNodes().empty() && n->GetOutAllNodes().empty(), continue;);
  241. SetInputIsConst(n);
  242. if (IsGeLocalOp(n->GetOpDesc())) {
  243. GE_CHK_STATUS_RET(CalcOutputSize(n), "Calculate output size failed");
  244. }
  245. ret = AdjustConstWeightSize(n, weight_offset_);
  246. GE_CHK_STATUS_RET(ret, "AdjustConstWeightSize failed");
  247. GE_IF_BOOL_EXEC(((weight_offset_ > 0) && (weight_offset_ % MEM_ALIGN_SIZE != 0)),
  248. weight_offset_ = (weight_offset_ + MEM_ALIGN_SIZE - 1) / MEM_ALIGN_SIZE * MEM_ALIGN_SIZE);
  249. }
  250. GE_CHK_STATUS_RET(compute_graph_->TopologicalSorting(), "TopologicalSorting failed");
  251. return SUCCESS;
  252. }
  253. void ModelBuilder::AddNodeInputProperty() {
  254. for (const ge::NodePtr &node : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  255. auto node_op_desc = node->GetOpDesc();
  256. GE_IF_BOOL_EXEC(node_op_desc == nullptr, GELOGW("node_op_desc is nullptr!"); return);
  257. vector<string> src_name_list;
  258. vector<int64_t> src_index_list;
  259. for (const auto &in_data_anchor : node->GetAllInDataAnchors()) {
  260. auto peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
  261. GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue);
  262. GE_IF_BOOL_EXEC(node_op_desc->HasAttr(MERGE_PRENODE_FLAG), continue);
  263. ge::NodePtr src_node = peer_out_anchor->GetOwnerNode();
  264. src_name_list.emplace_back(src_node->GetName());
  265. src_index_list.emplace_back(peer_out_anchor->GetIdx());
  266. }
  267. auto in_control_anchor = node->GetInControlAnchor();
  268. if (in_control_anchor != nullptr) {
  269. string src_name_temp;
  270. for (const auto &out_control_anchor : in_control_anchor->GetPeerOutControlAnchors()) {
  271. ge::NodePtr src_node = out_control_anchor->GetOwnerNode();
  272. src_name_temp = src_name_temp.empty() ? src_node->GetName() : src_name_temp + ":" + src_node->GetName();
  273. }
  274. GE_IF_BOOL_EXEC(!src_name_temp.empty(), src_name_list.emplace_back(src_name_temp);)
  275. }
  276. node_op_desc->SetSrcName(src_name_list);
  277. node_op_desc->SetSrcIndex(src_index_list);
  278. }
  279. for (const ge::NodePtr &node : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  280. auto node_op_desc = node->GetOpDesc();
  281. GE_IF_BOOL_EXEC(node_op_desc == nullptr, GELOGW("node_op_desc is nullptr!"); return);
  282. GE_IF_BOOL_EXEC(node_op_desc->GetType() == NETOUTPUT, continue);
  283. auto out_control_anchor = node->GetOutControlAnchor();
  284. GE_IF_BOOL_EXEC(out_control_anchor == nullptr, GELOGW("out_control_anchor is nullptr"); return);
  285. vector<string> dst_name_list;
  286. vector<int64_t> dst_index_list;
  287. string dst_name_temp;
  288. for (const auto &in_control_anchor : out_control_anchor->GetPeerInControlAnchors()) {
  289. ge::NodePtr dst_node = in_control_anchor->GetOwnerNode();
  290. dst_name_temp = dst_name_temp.empty() ? dst_node->GetName() : dst_name_temp + ":" + dst_node->GetName();
  291. }
  292. GE_IF_BOOL_EXEC(!dst_name_temp.empty(), dst_name_list.emplace_back(dst_name_temp));
  293. GE_IF_BOOL_EXEC(!out_control_anchor->GetPeerInControlAnchors().empty(),
  294. dst_index_list.emplace_back(kInvalidIndexNum));
  295. for (const auto &out_data_anchor : node->GetAllOutDataAnchors()) {
  296. GE_IF_BOOL_EXEC(node_op_desc->HasAttr(MERGE_PRENODE_FLAG), break);
  297. dst_name_temp = "";
  298. int64_t dst_index = kWrongIndex; // assign an impossible value to dst_index.
  299. for (const auto &in_data_anchor : out_data_anchor->GetPeerInDataAnchors()) {
  300. GE_IF_BOOL_EXEC(in_data_anchor == nullptr, GELOGW("in_data_anchor is nullptr"); return);
  301. ge::NodePtr dst_node = in_data_anchor->GetOwnerNode();
  302. dst_name_temp = dst_name_temp.empty() ? dst_node->GetName() : dst_name_temp + ":" + dst_node->GetName();
  303. dst_index = in_data_anchor->GetIdx();
  304. }
  305. GE_IF_BOOL_EXEC(dst_index != kWrongIndex, dst_index_list.emplace_back(dst_index)); // not found
  306. GE_IF_BOOL_EXEC(!dst_name_temp.empty(), dst_name_list.emplace_back(dst_name_temp));
  307. }
  308. node_op_desc->SetDstName(dst_name_list);
  309. node_op_desc->SetDstIndex(dst_index_list);
  310. }
  311. }
  312. Status ModelBuilder::AdjustInputTensorFlag() {
  313. for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  314. if ((n->GetType() == DATA_TYPE) || (n->GetType() == AIPP_DATA_TYPE)) {
  315. GELOGD("Data node: %s.", n->GetName().c_str());
  316. for (const auto &anchor : n->GetAllOutDataAnchors()) {
  317. for (const auto &in_anchors : anchor->GetPeerInDataAnchors()) {
  318. GE_IF_BOOL_EXEC(in_anchors == nullptr, continue);
  319. auto owner_node = in_anchors->GetOwnerNode();
  320. auto owner_node_op_desc = owner_node->GetOpDesc();
  321. GE_IF_BOOL_EXEC(owner_node_op_desc == nullptr, continue);
  322. auto input_desc = owner_node_op_desc->GetInputDesc(in_anchors->GetIdx());
  323. ge::TensorUtils::SetInputTensor(input_desc, true);
  324. if (owner_node_op_desc->UpdateInputDesc(in_anchors->GetIdx(), input_desc) != SUCCESS) {
  325. GELOGE(FAILED, "UpdateOutputDesc failed.");
  326. return FAILED;
  327. }
  328. }
  329. }
  330. }
  331. }
  332. return SUCCESS;
  333. }
  334. void ModelBuilder::InitL1FusionOption() {
  335. string buffer_optimize = "off_optimize";
  336. graphStatus ret = ge::GetContext().GetOption(BUFFER_OPTIMIZE, buffer_optimize);
  337. if (ret == GRAPH_SUCCESS) {
  338. is_l1_fusion_enable_ = (buffer_optimize == "l1_optimize");
  339. GELOGD("The value of %s is %s.", BUFFER_OPTIMIZE.c_str(), buffer_optimize.c_str());
  340. } else {
  341. GELOGW("The value of %s is empty.", kEnableL1Fusion.c_str());
  342. }
  343. }
  344. Status ModelBuilder::BuildModelDef(ge::Model &model) {
  345. ClearOriginalFormat();
  346. max_mem_offset_ = mem_type_to_mem_offset_[RT_MEMORY_HBM];
  347. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_MEMORY_SIZE, max_mem_offset_),
  348. GELOGE(FAILED, "SetInt of ATTR_MODEL_MEMORY_SIZE failed.");
  349. return FAILED);
  350. if (mem_type_to_mem_offset_.find(RT_MEMORY_P2P_DDR) != mem_type_to_mem_offset_.end()) {
  351. p2p_mem_offset_ = mem_type_to_mem_offset_[RT_MEMORY_P2P_DDR];
  352. }
  353. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_P2P_MEMORY_SIZE, p2p_mem_offset_),
  354. GELOGE(FAILED, "SetInt of ATTR_MODEL_P2P_MEMORY_SIZE failed.");
  355. return FAILED);
  356. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_WEIGHT_SIZE, weight_offset_),
  357. GELOGE(FAILED, "SetInt of ATTR_MODEL_WEIGHT_SIZE failed.");
  358. return FAILED);
  359. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_STREAM_NUM, stream_num_),
  360. GELOGE(FAILED, "SetInt of ATTR_MODEL_STREAM_NUM failed.");
  361. return FAILED);
  362. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_EVENT_NUM, event_num_),
  363. GELOGE(FAILED, "SetInt of ATTR_MODEL_EVENT_NUM failed.");
  364. return FAILED);
  365. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(&model, ATTR_MODEL_HUGE_STREAM_LIST, huge_streams_),
  366. GELOGE(FAILED, "SetInt of ATTR_MODEL_HUGE_STREAM_LIST failed.");
  367. return FAILED);
  368. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_LABEL_NUM, label_num_),
  369. GELOGE(FAILED, "SetInt of ATTR_MODEL_LABEL_NUM failed.");
  370. return FAILED);
  371. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_ZERO_COPY_MEMORY_SIZE, zero_copy_mem_size_),
  372. GELOGE(FAILED, "SetInt of ATTR_MODEL_ZERO_COPY_MEMORY_SIZE failed.");
  373. return FAILED);
  374. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, ATTR_MODEL_OUT_NODES_NAME, GetLocalOmgContext().net_out_nodes),
  375. GELOGE(FAILED, "SetListStr of ATTR_MODEL_OUT_NODES_NAME failed.");
  376. return FAILED);
  377. GELOGI("For model, max_mem_offset_: %zu, p2p_mem_size: %zu, zero_copy_mem_size_: %zu", max_mem_offset_,
  378. p2p_mem_offset_, zero_copy_mem_size_);
  379. string fp_ceiling_mode;
  380. if (ge::GetContext().GetOption("ge.fpCeilingMode", fp_ceiling_mode) == SUCCESS) {
  381. if (!ge::AttrUtils::SetStr(&model, ATTR_FP_CEILING_MODE, fp_ceiling_mode)) {
  382. GELOGE(FAILED, "Failed to set attr ATTR_FP_CEILING_MODE");
  383. return FAILED;
  384. }
  385. GELOGI("Set attr ATTR_FP_CEILING_MODE to model, value is %s.", fp_ceiling_mode.c_str());
  386. }
  387. string ge_core_type;
  388. Status ret = ge::GetContext().GetOption(kCoreType, ge_core_type);
  389. if (ret != SUCCESS) {
  390. GELOGW("get the option CORE_TYPE fail, set it to default value VECTOR_ENGINE");
  391. }
  392. int64_t core_type = (ge_core_type == kVectorCore) ? 1 : 0;
  393. GELOGI("core_type: %ld", core_type);
  394. if (!ge::AttrUtils::SetInt(&model, ATTR_MODEL_CORE_TYPE, core_type)) {
  395. GELOGE(FAILED, "SetInt of ATTR_CORE_TYPE failed.");
  396. }
  397. InitL1FusionOption();
  398. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(&model, ATTR_NAME_SWITCH_FOR_L1_FUSION, is_l1_fusion_enable_),
  399. GELOGE(FAILED, "SetBool of ATTR_NAME_SWITCH_FOR_L1_FUSION failed.");
  400. return FAILED);
  401. const DumpProperties &dump_properties = PropertiesManager::Instance().GetDumpProperties(session_id_);
  402. bool is_op_debug = dump_properties.IsOpDebugOpen();
  403. if (is_op_debug) {
  404. if (!ge::AttrUtils::SetBool(&model, ATTR_OP_DEBUG_FLAG, is_op_debug)) {
  405. GELOGE(FAILED, "SetBool of ATTR_OP_DEBUG_FLAG failed.");
  406. return FAILED;
  407. }
  408. uint32_t op_debug_mode = dump_properties.GetOpDebugMode();
  409. GELOGI("Get op debug mode:%d", op_debug_mode);
  410. if (!ge::AttrUtils::SetInt(&model, ATTR_OP_DEBUG_MODE, op_debug_mode)) {
  411. GELOGE(FAILED, "SetBool of ATTR_OP_DEBUG_MODE failed.");
  412. return FAILED;
  413. }
  414. }
  415. model.SetName(compute_graph_->GetName());
  416. model.SetGraph(ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph_));
  417. GELOGI("weight_offset_: %zu", weight_offset_);
  418. GELOGI("Set event num: %ld.", event_num_);
  419. if (Params::Instance() == nullptr) {
  420. return FAILED;
  421. }
  422. platform_type_ = Params::Instance()->GetTarget_8bit();
  423. return SUCCESS;
  424. }
  425. void ModelBuilder::ClearOriginalFormat() {
  426. for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  427. auto node_op_desc = n->GetOpDesc();
  428. if (node_op_desc != nullptr) {
  429. if (node_op_desc->HasAttr(ATTR_NAME_FORMAT)) {
  430. if (node_op_desc->DelAttr(ATTR_NAME_FORMAT) != SUCCESS) {
  431. GELOGW("DelAttr ATTR_NAME_FORMAT failed.");
  432. }
  433. }
  434. GE_IF_BOOL_EXEC(
  435. node_op_desc->HasAttr(ATTR_NAME_INFERRED_FORMAT),
  436. if (node_op_desc->DelAttr(ATTR_NAME_INFERRED_FORMAT) != SUCCESS) {
  437. GELOGW("DelAttr ATTR_NAME_INFERRED_FORMAT failed.");
  438. });
  439. GE_IF_BOOL_EXEC(
  440. node_op_desc->HasAttr(ATTR_NAME_PRED_PERMUTE_DELETED),
  441. if (node_op_desc->DelAttr(ATTR_NAME_PRED_PERMUTE_DELETED) != SUCCESS) {
  442. GELOGW("DelAttr ATTR_NAME_PRED_PERMUTE_DELETED failed.");
  443. });
  444. GE_IF_BOOL_EXEC(
  445. node_op_desc->HasAttr(ATTR_NAME_IGNORE_PRED_FORMAT),
  446. if (node_op_desc->DelAttr(ATTR_NAME_IGNORE_PRED_FORMAT) != SUCCESS) {
  447. GELOGW("DelAttr ATTR_NAME_IGNORE_PRED_FORMAT failed.");
  448. });
  449. }
  450. }
  451. }
  452. Status ModelBuilder::MergeWeights() {
  453. if (weight_offset_ == 0) {
  454. return SUCCESS;
  455. }
  456. ge::Buffer buffer(weight_offset_);
  457. weight_buffer_ = buffer;
  458. auto base_addr = weight_buffer_.GetData();
  459. for (const ge::NodePtr &node : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  460. auto op_desc = node->GetOpDesc();
  461. GE_IF_BOOL_EXEC(op_desc == nullptr, continue);
  462. if (node->GetType() != CONSTANT) {
  463. continue;
  464. }
  465. // Get const op weight pointer
  466. ge::GeTensorPtr weight = nullptr;
  467. // If MutableTensor failed, weight is nullptr.
  468. (void)ge::AttrUtils::MutableTensor(op_desc, ATTR_NAME_WEIGHTS, weight);
  469. if (weight == nullptr) {
  470. GELOGE(FAILED, "Can't get const op weight, name: %s", node->GetName().c_str());
  471. return FAILED;
  472. }
  473. // Get const op weight offset
  474. int64_t offset = 0;
  475. if (ge::TensorUtils::GetDataOffset(weight->GetTensorDesc(), offset) != SUCCESS) {
  476. GELOGW("Can't get const op offset, name: %s", node->GetName().c_str());
  477. continue; // continue to merge if can not get offset
  478. }
  479. // Get const op weight data
  480. auto weight_data = weight->MutableData();
  481. // copy const op weight data to buffer
  482. GELOGI("Move to buffer, name: %s offset: %ld size: %zu", node->GetName().c_str(), offset, weight_data.size());
  483. ge::TensorUtils::SetWeightSize(weight->MutableTensorDesc(), static_cast<uint32_t>(weight_data.size()));
  484. if ((offset == 0) || (weight_data.size() == 0)) {
  485. GELOGI("Size or offset is 0. size: %lu offset: %ld", weight_data.size(), offset);
  486. continue;
  487. }
  488. if (weight_data.data() != nullptr) {
  489. GE_IF_BOOL_EXEC(base_addr == nullptr, GELOGE(FAILED, "Base addr is nullptr."); return FAILED);
  490. if (weight_offset_ - offset < weight_data.size()) {
  491. GELOGE(FAILED, "left weight size not enough. left_size:%lu, weight_size:%lu",
  492. weight_offset_ - offset, weight_data.size());
  493. return FAILED;
  494. }
  495. uintptr_t dst_ptr = reinterpret_cast<uintptr_t>(base_addr) + offset;
  496. uintptr_t src_ptr = reinterpret_cast<uintptr_t>(weight_data.data());
  497. size_t left_size = weight_data.size();
  498. while (left_size > SECUREC_MEM_MAX_LEN) {
  499. auto err = memcpy_s(reinterpret_cast<void *>(dst_ptr), SECUREC_MEM_MAX_LEN, reinterpret_cast<void *>(src_ptr),
  500. SECUREC_MEM_MAX_LEN);
  501. if (err != EOK) {
  502. GELOGE(FAILED, "mem copy failed. errret:%u, "
  503. "dst_ptr:%lx, dst_size:%lu, src_ptr:%lx, src_size:%lu",
  504. err, dst_ptr, SECUREC_MEM_MAX_LEN, src_ptr, SECUREC_MEM_MAX_LEN);
  505. return FAILED;
  506. }
  507. left_size -= SECUREC_MEM_MAX_LEN;
  508. dst_ptr = dst_ptr + SECUREC_MEM_MAX_LEN;
  509. src_ptr = src_ptr + SECUREC_MEM_MAX_LEN;
  510. }
  511. auto err = memcpy_s(reinterpret_cast<void *>(dst_ptr), left_size, reinterpret_cast<void *>(src_ptr), left_size);
  512. if (err != EOK) {
  513. GELOGE(FAILED, "mem copy failed. errret:%u, "
  514. "dst_ptr:%lx, dst_size:%lu, src_ptr:%lx, src_size:%lu",
  515. err, dst_ptr, SECUREC_MEM_MAX_LEN, src_ptr, SECUREC_MEM_MAX_LEN);
  516. return FAILED;
  517. }
  518. }
  519. weight_data.clear();
  520. }
  521. return SUCCESS;
  522. }
  523. Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) {
  524. // Add weight
  525. ge_model.SetWeight(weight_buffer_);
  526. // Add TBE Kernels and custom aicpu op bin
  527. std::set<std::string> tbe_name_set;
  528. std::set<std::string> aicpu_name_set;
  529. std::set<std::string> aicpu_op_types;
  530. std::set<std::string> aicpu_tf_op_types;
  531. for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  532. auto node_op_desc = n->GetOpDesc();
  533. GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue);
  534. // check aicpu op type
  535. CollectCheckAicpuAttr(node_op_desc, aicpu_op_types, aicpu_tf_op_types);
  536. TBEKernelPtr tbe_kernel = node_op_desc->TryGetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, TBEKernelPtr());
  537. if (tbe_kernel == nullptr) {
  538. std::string kernel_name;
  539. GeAttrValue::BYTES kernel_buffer;
  540. (void) AttrUtils::GetStr(node_op_desc, ATTR_NAME_TBE_KERNEL_NAME, kernel_name);
  541. (void) AttrUtils::GetBytes(node_op_desc, ATTR_NAME_TBE_KERNEL_BUFFER, kernel_buffer);
  542. if (!kernel_name.empty() && (kernel_buffer.GetSize() > 0)) {
  543. GE_CHECK_NOTNULL(kernel_buffer.GetData());
  544. std::vector<char> data(kernel_buffer.GetData(), kernel_buffer.GetData() + kernel_buffer.GetSize());
  545. tbe_kernel = std::make_shared<OpKernelBin>(kernel_name, std::move(data));
  546. }
  547. }
  548. GE_IF_BOOL_EXEC(tbe_kernel == nullptr, continue);
  549. if (tbe_name_set.count(tbe_kernel->GetName()) > 0) {
  550. GELOGE(FAILED, "tbe_kernel name %s can't be the same", tbe_kernel->GetName().c_str());
  551. return FAILED;
  552. }
  553. tbe_name_set.insert(tbe_kernel->GetName());
  554. tbe_kernel_store_.AddTBEKernel(tbe_kernel);
  555. }
  556. SetModelCheckAicpuAttr(model, aicpu_op_types, aicpu_tf_op_types);
  557. for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  558. auto node_op_desc = n->GetOpDesc();
  559. GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue);
  560. CustAICPUKernelPtr cust_aicpu_kernel =
  561. node_op_desc->TryGetExtAttr(ge::OP_EXTATTR_CUSTAICPU_KERNEL, CustAICPUKernelPtr());
  562. GE_IF_BOOL_EXEC(cust_aicpu_kernel == nullptr, continue);
  563. if (aicpu_name_set.count(cust_aicpu_kernel->GetName()) > 0) {
  564. GELOGE(FAILED, "aicpu_kernel name %s can't be the same", cust_aicpu_kernel->GetName().c_str());
  565. return FAILED;
  566. }
  567. aicpu_name_set.insert(cust_aicpu_kernel->GetName());
  568. cust_aicpu_kernel_store_.AddCustAICPUKernel(cust_aicpu_kernel);
  569. GELOGI("Add cust aicpu kernel bin %s", cust_aicpu_kernel->GetName().c_str());
  570. }
  571. if (!tbe_kernel_store_.Build()) {
  572. GELOGE(FAILED, "TBE Kernels store build failed!");
  573. return FAILED;
  574. }
  575. if (!cust_aicpu_kernel_store_.Build()) {
  576. GELOGE(FAILED, "custom AICPU kernels store build failed!");
  577. return FAILED;
  578. }
  579. ge_model.SetTBEKernelStore(tbe_kernel_store_);
  580. ge_model.SetCustAICPUKernelStore(cust_aicpu_kernel_store_);
  581. // Add task
  582. GeAttrValue::BYTES task_def_bytes;
  583. if (!AttrUtils::GetZeroCopyBytes(model, MODEL_ATTR_TASKS, task_def_bytes)) {
  584. GELOGE(INTERNAL_ERROR, "Get zero copy bytes fail.");
  585. return INTERNAL_ERROR;
  586. }
  587. int byte_size = static_cast<int>(task_def_bytes.GetSize());
  588. std::shared_ptr<domi::ModelTaskDef> task = ge::MakeShared<domi::ModelTaskDef>();
  589. GE_CHECK_NOTNULL(task);
  590. GE_CHK_BOOL_EXEC(ReadProtoFromArray(task_def_bytes.GetData(), byte_size, task.get()), return INTERNAL_ERROR,
  591. "ReadProtoFromArray failed.");
  592. ge_model.SetModelTaskDef(task);
  593. // Add graph
  594. ge_model.SetName(model.GetName());
  595. ge_model.SetGraph(model.GetGraph());
  596. ge_model.SetVersion(model.GetVersion());
  597. ge_model.SetPlatformVersion(model.GetPlatformVersion());
  598. ge_model.SetPlatformType(platform_type_);
  599. ge_model.SetAttr(model.MutableAttrMap());
  600. return SUCCESS;
  601. }
  602. void ModelBuilder::SetModelVersion(ge::Model &model) {
  603. // set framework_version TO model
  604. string framework_version;
  605. uint32_t counter = 0;
  606. Status frame_rt = PlatformVersionManager::GetPlatformVersion(framework_version);
  607. GE_IF_BOOL_EXEC((frame_rt == SUCCESS),
  608. string model_framework_version = framework_version + "." + std::to_string(counter);
  609. model.SetPlatformVersion(model_framework_version););
  610. // set IR Version TO model
  611. model.SetVersion(static_cast<uint32_t>(OM_PROTO_VERSION));
  612. }
  613. Status ModelBuilder::PreBuildModel() {
  614. if ((compute_graph_ == nullptr) || !(compute_graph_->IsValid())) {
  615. GELOGE(FAILED, "Graph_ is not valid.");
  616. return FAILED;
  617. }
  618. GE_CHK_STATUS_RET(SetInputOutputDesc(), "SetInputOutputDesc Failed!");
  619. AddNodeInputProperty();
  620. return SUCCESS;
  621. }
  622. Status ModelBuilder::BuildModelForGetTask(ge::Model &model) {
  623. GE_CHK_STATUS_RET(AdjustInputTensorFlag(), "AdjustInputTensorFlag failed!");
  624. // Assign logical streams.
  625. StreamAllocator stream_allocator(compute_graph_, subgraphs_);
  626. GE_TIMESTAMP_START(AssignLogicalStreams);
  627. GE_CHK_STATUS_RET(stream_allocator.AssignLogicalStreams(stream_max_parallel_num_, hcom_parallel_),
  628. "Assign logical streams failed.");
  629. GE_TIMESTAMP_END(AssignLogicalStreams, "GraphBuilder::AssignLogicalStreams");
  630. // Assign functional op labels.
  631. auto root_graph = GraphUtils::FindRootGraph(compute_graph_);
  632. (void)AttrUtils::GetInt(*root_graph, ATTR_MODEL_LABEL_NUM, label_num_);
  633. GE_TIMESTAMP_START(AssignMemory);
  634. MemoryAssigner mem_assigner(compute_graph_);
  635. GE_CHK_STATUS_RET(mem_assigner.AssignMemory(is_loop_graph_, mem_type_to_mem_offset_, zero_copy_mem_size_),
  636. "Assign Memory Failed!");
  637. GE_TIMESTAMP_END(AssignMemory, "GraphBuilder::AssignMemory");
  638. GE_TIMESTAMP_START(SetInputOutputOffset);
  639. SetInputOutputOffsetPass input_output_offset;
  640. GE_CHK_STATUS_RET(input_output_offset.Run(compute_graph_), "Set input output offset failed.");
  641. GE_TIMESTAMP_END(SetInputOutputOffset, "SetInputOutputOffsetPass::Run.");
  642. // Compile single op in graph build stage
  643. GE_TIMESTAMP_START(CompileSingleOp);
  644. GE_CHK_STATUS_RET(CompileSingleOp(), "ATC builder CompileSingleOp() return fail.");
  645. GE_TIMESTAMP_EVENT_END(CompileSingleOp, "GraphBuilder::CompileSingleOp");
  646. // Refresh real streams and insert event nodes.
  647. GE_TIMESTAMP_START(RefreshRealStream);
  648. GE_CHK_STATUS_RET(stream_allocator.RefreshRealStream(stream_num_, event_num_), "RefreshRealStream failed.");
  649. huge_streams_ = stream_allocator.GetHugeStreams();
  650. GE_TIMESTAMP_END(RefreshRealStream, "GraphBuilder::RefreshRealStream");
  651. GE_TIMESTAMP_START(MergeWeights);
  652. GE_CHK_STATUS_RET(MergeWeights(), "MergeWeights Failed!");
  653. GE_TIMESTAMP_END(MergeWeights, "GraphBuilder::MergeWeights");
  654. GE_TIMESTAMP_START(BuildModelDef);
  655. GE_CHK_STATUS_RET(BuildModelDef(model), "BuildModelDef failed!");
  656. GE_TIMESTAMP_END(BuildModelDef, "GraphBuilder::BuildModelDef");
  657. SetModelVersion(model);
  658. return SUCCESS;
  659. }
  660. Status ModelBuilder::BuildModelForGetDynShapeTask(ge::Model &model_def) {
  661. GE_TIMESTAMP_START(BuildModelDef);
  662. GE_CHK_STATUS_RET(BuildModelDef(model_def), "BuildModelDef failed!");
  663. GE_TIMESTAMP_END(BuildModelDef, "GraphBuilder::BuildModelDef");
  664. SetModelVersion(model_def);
  665. return SUCCESS;
  666. }
  667. ge::Buffer ModelBuilder::GetWeightBuffer() const { return weight_buffer_; }
  668. Status ModelBuilder::CompileSingleOp() {
  669. GELOGD("Begin to compile single op.");
  670. // Create ge instance
  671. std::shared_ptr<GELib> instance = ge::GELib::GetInstance();
  672. if ((instance == nullptr) || !instance->InitFlag()) {
  673. GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "CompileSingleOp failed.");
  674. return ge::GE_CLI_GE_NOT_INITIALIZED;
  675. }
  676. GE_TIMESTAMP_CALLNUM_START(BatchCompileOp);
  677. std::unordered_map<string, vector<ge::NodePtr>> node_vector_map;
  678. for (auto &node : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  679. auto op_desc = node->GetOpDesc();
  680. if (op_desc == nullptr) {
  681. continue;
  682. }
  683. // Graph build stage only supports the individual compilation of atomic clean operator
  684. if (op_desc->GetType() == ATOMICADDRCLEAN) {
  685. GELOGD("Begin to compile single op, op name is %s.", op_desc->GetName().c_str());
  686. string kernel_lib_name = op_desc->GetOpKernelLibName();
  687. if (kernel_lib_name.empty()) {
  688. // Reset op kernel lib
  689. (void)instance->DNNEngineManagerObj().GetDNNEngineName(node);
  690. kernel_lib_name = op_desc->GetOpKernelLibName();
  691. if (kernel_lib_name.empty()) {
  692. GELOGE(ge::INTERNAL_ERROR, "Get node:%s(%s) kernel lib failed.", node->GetName().c_str(),
  693. node->GetType().c_str());
  694. return ge::INTERNAL_ERROR;
  695. }
  696. }
  697. OpsKernelInfoStorePtr kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_lib_name);
  698. if (kernel_info != nullptr) {
  699. node_vector_map[kernel_lib_name].emplace_back(node);
  700. } else {
  701. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s ops kernel info store failed", node->GetName().c_str());
  702. return ge::GE_GRAPH_PARAM_NULLPTR;
  703. }
  704. }
  705. }
  706. for (auto &it : node_vector_map) {
  707. auto &kernel_lib_name = it.first;
  708. auto &node_vector = it.second;
  709. OpsKernelInfoStorePtr kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_lib_name);
  710. GE_CHECK_NOTNULL(kernel_info);
  711. GE_TIMESTAMP_RESTART(BatchCompileOp);
  712. auto ret = kernel_info->CompileOp(node_vector);
  713. GELOGI("[GEPERFTRACE] The node size of compile op of %s is %zu", kernel_lib_name.c_str(), node_vector.size());
  714. GE_TIMESTAMP_ADD(BatchCompileOp);
  715. if (ret != ge::SUCCESS) {
  716. GELOGE(ret, "Compile op failed, kernel lib name is %s", kernel_lib_name.c_str());
  717. return ret;
  718. }
  719. }
  720. GE_TIMESTAMP_CALLNUM_END(BatchCompileOp, "GraphBuild::CompileOp");
  721. return ge::SUCCESS;
  722. }
  723. void ModelBuilder::CollectCheckAicpuAttr(const OpDescPtr &op_desc, std::set<std::string> &aicpu_op_types,
  724. std::set<std::string> &aicpu_tf_op_types) {
  725. std::string aicpu_optype;
  726. bool has_attr_check_cpu = ge::AttrUtils::GetStr(op_desc, "needCheckCpu", aicpu_optype);
  727. std::vector<std::string> tf_optypes;
  728. bool has_attr_check_tf = ge::AttrUtils::GetListStr(op_desc, "needCheckTf", tf_optypes);
  729. if (has_attr_check_cpu && !aicpu_optype.empty()) {
  730. aicpu_op_types.insert(aicpu_optype);
  731. }
  732. if (has_attr_check_tf && !tf_optypes.empty()) {
  733. aicpu_tf_op_types.insert(tf_optypes.begin(), tf_optypes.end());
  734. }
  735. return;
  736. }
  737. void ModelBuilder::SetModelCheckAicpuAttr(ge::Model &model, std::set<std::string> &aicpu_op_types,
  738. std::set<std::string> &aicpu_tf_op_types) {
  739. std::vector<std::string> aicpu_optype_list;
  740. std::vector<std::string> aicpu_tf_optype_list;
  741. if (ge::AttrUtils::GetListStr(&model, "needCheckCpu", aicpu_optype_list)) {
  742. GELOGI("Already have aicpu optype size: %zu", aicpu_optype_list.size());
  743. aicpu_op_types.insert(aicpu_optype_list.begin(), aicpu_optype_list.end());
  744. }
  745. if (ge::AttrUtils::GetListStr(&model, "needCheckTf", aicpu_tf_optype_list)) {
  746. GELOGI("Already have aicpu tf optype size: %zu", aicpu_tf_optype_list.size());
  747. aicpu_tf_op_types.insert(aicpu_tf_optype_list.begin(), aicpu_tf_optype_list.end());
  748. }
  749. // reset list with set
  750. aicpu_optype_list.assign(aicpu_op_types.begin(), aicpu_op_types.end());
  751. aicpu_tf_optype_list.assign(aicpu_tf_op_types.begin(), aicpu_tf_op_types.end());
  752. GELOGI(
  753. "Check Aicpu op types ComputeGraph: %s aicpu_op_types: %zu, aicpu_optype_list: %zu, aicpu_tf_op_types: %zu, "
  754. "aicpu_tf_optype_list:%zu.",
  755. compute_graph_->GetName().c_str(), aicpu_op_types.size(), aicpu_optype_list.size(), aicpu_tf_op_types.size(),
  756. aicpu_tf_optype_list.size());
  757. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, "needCheckCpu", aicpu_optype_list), return,
  758. "Set attr needCheckCpu fail.");
  759. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, "needCheckTf", aicpu_tf_optype_list), return,
  760. "Set attr needCheckTf fail.");
  761. return;
  762. }
  763. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示