You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

single_op_parser.cc 20 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "single_op_parser.h"
  17. #include <vector>
  18. #include <algorithm>
  19. #include <fstream>
  20. #include <sstream>
  21. #include <nlohmann/json.hpp>
  22. #include "framework/common/debug/ge_log.h"
  23. #include "common/util/error_manager/error_manager.h"
  24. #include "common/ge_inner_error_codes.h"
  25. #include "framework/common/util.h"
  26. #include "graph/utils/tensor_utils.h"
  27. #include "graph/utils/op_desc_utils.h"
  28. #include "graph/operator_factory_impl.h"
  29. using Json = nlohmann::json;
  30. using std::string;
  31. using std::vector;
  32. using std::map;
  33. namespace ge {
  34. namespace {
  35. constexpr char const *kKeyOp = "op";
  36. constexpr char const *kKeyInputDesc = "input_desc";
  37. constexpr char const *kKeyOutputDesc = "output_desc";
  38. constexpr char const *kKeyAttr = "attr";
  39. constexpr char const *kKeyName = "name";
  40. constexpr char const *kKeyType = "type";
  41. constexpr char const *kKeyShape = "shape";
  42. constexpr char const *kKeyShapeRange = "shape_range";
  43. constexpr char const *kKeyValue = "value";
  44. constexpr char const *kKeyFormat = "format";
  45. constexpr char const *kFileSuffix = ".om";
  46. constexpr char const *kKeyDynamicInput = "dynamic_input";
  47. constexpr char const *kKeyDynamicOutput = "dynamic_output";
  48. constexpr int kDumpJsonIndent = 2;
  49. constexpr int kShapeRangePairSize = 2;
  50. constexpr int kShapeRangeLow = 0;
  51. constexpr int kShapeRangeHigh = 1;
  52. map<string, GeAttrValue::ValueType> kAttrTypeDict = {
  53. {"bool", GeAttrValue::VT_BOOL},
  54. {"int", GeAttrValue::VT_INT},
  55. {"float", GeAttrValue::VT_FLOAT},
  56. {"string", GeAttrValue::VT_STRING},
  57. {"list_bool", GeAttrValue::VT_LIST_BOOL},
  58. {"list_int", GeAttrValue::VT_LIST_INT},
  59. {"list_float", GeAttrValue::VT_LIST_FLOAT},
  60. {"list_string", GeAttrValue::VT_LIST_STRING},
  61. {"list_list_int", GeAttrValue::VT_LIST_LIST_INT},
  62. {"data_type", GeAttrValue::VT_DATA_TYPE},
  63. };
  64. map<string, DataType> kDataTypeDict = {
  65. {"bool", DT_BOOL},
  66. {"int8", DT_INT8},
  67. {"uint8", DT_UINT8},
  68. {"int16", DT_INT16},
  69. {"uint16", DT_UINT16},
  70. {"int32", DT_INT32},
  71. {"uint32", DT_UINT32},
  72. {"int64", DT_INT64},
  73. {"uint64", DT_UINT64},
  74. {"float16", DT_FLOAT16},
  75. {"half", DT_FLOAT16},
  76. {"fp16", DT_FLOAT16},
  77. {"float", DT_FLOAT},
  78. {"float32", DT_FLOAT},
  79. {"double", DT_DOUBLE},
  80. };
  81. map<string, Format> kFormatDict = {
  82. {"nchw", FORMAT_NCHW},
  83. {"nhwc", FORMAT_NHWC},
  84. {"nd", FORMAT_ND},
  85. {"fractal_nz", FORMAT_FRACTAL_NZ},
  86. {"fractal_z", FORMAT_FRACTAL_Z},
  87. {"nc1hwc0", FORMAT_NC1HWC0},
  88. };
  89. }
  90. template<typename T>
  91. void SetAttrValue(const Json &j, SingleOpAttr &attr) {
  92. attr.value.SetValue<T>(j.at(kKeyValue).get<T>());
  93. }
  94. template<typename T>
  95. T GetValue(const map<string, T> &dict, string &key, T default_val) {
  96. transform(key.begin(), key.end(), key.begin(), ::tolower);
  97. auto it = dict.find(key);
  98. if (it == dict.end()) {
  99. return default_val;
  100. }
  101. return it->second;
  102. }
  103. void from_json(const Json &j, SingleOpTensorDesc &desc) {
  104. desc.dims = j.at(kKeyShape).get<vector<int64_t>>();
  105. auto it = j.find(kKeyShapeRange);
  106. if (it != j.end()) {
  107. desc.dim_ranges = j.at(kKeyShapeRange).get<vector<std::vector<int64_t>>>();
  108. }
  109. string format_str = j.at(kKeyFormat).get<string>();
  110. string type_str = j.at(kKeyType).get<string>();
  111. desc.format = GetValue(kFormatDict, format_str, FORMAT_RESERVED);
  112. desc.type = GetValue(kDataTypeDict, type_str, DT_UNDEFINED);
  113. auto tensor_name = j.find(kKeyName);
  114. if (tensor_name != j.end()) {
  115. desc.name = tensor_name->get<string>();
  116. }
  117. auto dynamic_input_name = j.find(kKeyDynamicInput);
  118. if (dynamic_input_name != j.end()) {
  119. desc.dynamic_input_name = dynamic_input_name->get<string>();
  120. }
  121. }
  122. void from_json(const Json &j, SingleOpAttr &attr) {
  123. attr.name = j.at(kKeyName).get<string>();
  124. attr.type = j.at(kKeyType).get<string>();
  125. auto it = kAttrTypeDict.find(attr.type);
  126. if (it == kAttrTypeDict.end()) {
  127. GELOGE(UNSUPPORTED, "Parse attr[%s] failed. Unsupported type: %s", attr.name.c_str(), attr.type.c_str());
  128. return;
  129. }
  130. switch (it->second) {
  131. case GeAttrValue::VT_BOOL:
  132. SetAttrValue<bool>(j, attr);
  133. break;
  134. case GeAttrValue::VT_INT:
  135. SetAttrValue<int64_t>(j, attr);
  136. break;
  137. case GeAttrValue::VT_FLOAT:
  138. SetAttrValue<float>(j, attr);
  139. break;
  140. case GeAttrValue::VT_STRING:
  141. SetAttrValue<string>(j, attr);
  142. break;
  143. case GeAttrValue::VT_LIST_BOOL:
  144. SetAttrValue<vector<bool>>(j, attr);
  145. break;
  146. case GeAttrValue::VT_LIST_INT:
  147. SetAttrValue<vector<int64_t>>(j, attr);
  148. break;
  149. case GeAttrValue::VT_LIST_FLOAT:
  150. SetAttrValue<vector<float>>(j, attr);
  151. break;
  152. case GeAttrValue::VT_LIST_STRING:
  153. SetAttrValue<vector<string>>(j, attr);
  154. break;
  155. case GeAttrValue::VT_LIST_LIST_INT:
  156. SetAttrValue<vector<vector<int64_t>>>(j, attr);
  157. break;
  158. case GeAttrValue::VT_DATA_TYPE:
  159. SetAttrValue<DataType>(j, attr);
  160. break;
  161. default:
  162. GELOGE(UNSUPPORTED, "Parse attr[%s] failed. Unsupported type: %s", attr.name.c_str(), attr.type.c_str());
  163. break;
  164. }
  165. }
  166. void from_json(const Json &j, SingleOpDesc &desc) {
  167. desc.op = j.at(kKeyOp).get<string>();
  168. auto input_desc = j.find(kKeyInputDesc);
  169. if (input_desc != j.end()) {
  170. desc.input_desc = input_desc->get<vector<SingleOpTensorDesc>>();
  171. }
  172. auto output_desc = j.find(kKeyOutputDesc);
  173. if (output_desc != j.end()) {
  174. desc.output_desc = output_desc->get<vector<SingleOpTensorDesc>>();
  175. }
  176. auto attr_field = j.find(kKeyAttr);
  177. if (attr_field != j.end()) {
  178. desc.attrs = attr_field->get<vector<SingleOpAttr>>();
  179. }
  180. }
  181. Status SingleOpParser::ReadJsonFile(const std::string &file, Json &json_obj) {
  182. std::string real_path = RealPath(file.c_str());
  183. if (real_path.empty()) {
  184. ErrorManager::GetInstance().ATCReportErrMessage("E10023", {"value"}, {file});
  185. GELOGE(FAILED, "Input parameter[--singleop]'s value[%s] is not a valid path.", file.c_str());
  186. return INTERNAL_ERROR;
  187. }
  188. std::ifstream ifs(real_path);
  189. if (!ifs.is_open()) {
  190. ErrorManager::GetInstance().ATCReportErrMessage("E10024", {"value"}, {file});
  191. GELOGE(FAILED, "Open file[%s] provided in input parameter[--singleop] failed.", file.c_str());
  192. return FAILED;
  193. }
  194. try {
  195. ifs >> json_obj;
  196. } catch (const std::exception &e) {
  197. ErrorManager::GetInstance().ATCReportErrMessage("E10025", {"realpath", "errmsg"}, {real_path, e.what()});
  198. GELOGE(PARAM_INVALID, "Parse file[%s] provided in input parameter[--singleop] failed, exception = %s.",
  199. real_path.c_str(), e.what());
  200. return PARAM_INVALID;
  201. }
  202. ifs.close();
  203. return SUCCESS;
  204. }
  205. bool SingleOpParser::Validate(const SingleOpDesc &op_desc) {
  206. if (op_desc.op.empty()) {
  207. ErrorManager::GetInstance().ATCReportErrMessage("E10026");
  208. GELOGE(PARAM_INVALID, "Op name is empty");
  209. return false;
  210. }
  211. int index = 0;
  212. for (auto &tensor_desc : op_desc.input_desc) {
  213. if ((tensor_desc.type == DT_UNDEFINED && tensor_desc.format != FORMAT_RESERVED) ||
  214. (tensor_desc.type != DT_UNDEFINED && tensor_desc.format == FORMAT_RESERVED)){
  215. ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "type", "index"},
  216. {"intput", "datatype or format", std::to_string(index)});
  217. GELOGE(PARAM_INVALID, "Input's dataType or format is invalid when the index is %d", index);
  218. return false;
  219. }
  220. ++index;
  221. }
  222. index = 0;
  223. for (auto &tensor_desc : op_desc.output_desc) {
  224. if (tensor_desc.type == DT_UNDEFINED) {
  225. ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "type", "index"},
  226. {"output", "datatype", std::to_string(index)});
  227. GELOGE(PARAM_INVALID, "Output's dataType is invalid when the index is %d", index);
  228. return false;
  229. }
  230. if (tensor_desc.format == FORMAT_RESERVED) {
  231. ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "type", "index"},
  232. {"output", "format", std::to_string(index)});
  233. GELOGE(PARAM_INVALID, "Output's format is invalid when the index is %d", index);
  234. return false;
  235. }
  236. ++index;
  237. }
  238. for (auto &attr : op_desc.attrs) {
  239. if (attr.name.empty()) {
  240. ErrorManager::GetInstance().ATCReportErrMessage("E10029");
  241. GELOGE(PARAM_INVALID, "attr name is empty");
  242. return false;
  243. }
  244. if (attr.value.IsEmpty()) {
  245. ErrorManager::GetInstance().ATCReportErrMessage("E10030", {"attrname"}, {attr.name});
  246. GELOGE(PARAM_INVALID, "Parse attr \"%s\" failed. ", attr.name.c_str());
  247. return false;
  248. }
  249. }
  250. return true;
  251. }
  252. std::unique_ptr<OpDesc> SingleOpParser::CreateOpDesc(const string &op_type) {
  253. return std::unique_ptr<OpDesc>(new(std::nothrow) OpDesc(op_type, op_type));
  254. }
  255. Status SingleOpParser::UpdateDynamicTensorName(std::vector<SingleOpDesc> &desc) {
  256. std::map<std::string, int> dynamic_name_map;
  257. for (auto &tensor : desc) {
  258. if (tensor.dynamic_input_name.empty()) {
  259. continue;
  260. }
  261. if (dynamic_name_map.find(tensor.dynamic_input_name) == dynamic_name_map.end()) {
  262. dynamic_name_map[tensor.dynamic_input_name] = 0;
  263. } else {
  264. dynamic_name_map[tensor.dynamic_input_name]++;
  265. }
  266. tensor_name = tensor.dynamic_input_name + std::to_string(dynamic_name_map[tensor.dynamic_input_name]);
  267. }
  268. GELOGD("Update dynamic tensor name success!");
  269. }
  270. Status SingleOpParser::ConvertToBuildParam(int index,
  271. const SingleOpDesc &single_op_desc,
  272. SingleOpBuildParam &build_param) {
  273. auto op_desc = CreateOpDesc(single_op_desc.op);
  274. if (op_desc == nullptr) {
  275. GELOGE(MEMALLOC_FAILED, "Failed to create instance of opDesc");
  276. return MEMALLOC_FAILED;
  277. }
  278. std::stringstream file_name;
  279. file_name << index;
  280. file_name << "_" << single_op_desc.op;
  281. for (auto &desc : single_op_desc.input_desc) {
  282. file_name << "_" << desc.type << "_" << desc.format;
  283. for (auto dim : desc.dims) {
  284. file_name << "_" << dim;
  285. }
  286. GeTensorDesc ge_tensor_desc(GeShape(desc.dims),
  287. desc.format,
  288. desc.type);
  289. ge_tensor_desc.SetOriginFormat(desc.format);
  290. GE_CHK_STATUS_RET_NOLOG(SetShapeRange(op_desc->GetName(), desc, ge_tensor_desc));
  291. TensorUtils::SetRealDimCnt(ge_tensor_desc, desc.dims.size());
  292. TensorUtils::SetInputTensor(ge_tensor_desc, true);
  293. TensorUtils::SetOutputTensor(ge_tensor_desc, false);
  294. if (desc.name.empty()) {
  295. op_desc->AddInputDesc(ge_tensor_desc);
  296. } else {
  297. op_desc->AddInputDesc(desc.name, ge_tensor_desc);
  298. }
  299. build_param.inputs.emplace_back(ge_tensor_desc);
  300. }
  301. for (auto &desc : single_op_desc.output_desc) {
  302. file_name << "_" << desc.type << "_" << desc.format;
  303. for (auto dim : desc.dims) {
  304. file_name << "_" << dim;
  305. }
  306. GeTensorDesc ge_tensor_desc(GeShape(desc.dims),
  307. desc.format,
  308. desc.type);
  309. ge_tensor_desc.SetOriginFormat(desc.format);
  310. GE_CHK_STATUS_RET_NOLOG(SetShapeRange(op_desc->GetName(), desc, ge_tensor_desc));
  311. TensorUtils::SetRealDimCnt(ge_tensor_desc, desc.dims.size());
  312. TensorUtils::SetInputTensor(ge_tensor_desc, false);
  313. TensorUtils::SetOutputTensor(ge_tensor_desc, true);
  314. if (desc.name.empty()) {
  315. op_desc->AddOutputDesc(ge_tensor_desc);
  316. } else {
  317. op_desc->AddOutputDesc(desc.name, ge_tensor_desc);
  318. }
  319. build_param.outputs.emplace_back(ge_tensor_desc);
  320. }
  321. for (const auto &attr : single_op_desc.attrs) {
  322. op_desc->SetAttr(attr.name, attr.value);
  323. }
  324. if (VerifyOpInputOutputSizeByIr(*op_desc) != SUCCESS) {
  325. GELOGE(PARAM_INVALID, "Verify op [%s] input or output size failed.", op_desc->GetType().c_str());
  326. return PARAM_INVALID;
  327. }
  328. file_name << kFileSuffix;
  329. build_param.file_name = file_name.str();
  330. build_param.op_desc.reset(op_desc.release());
  331. return SUCCESS;
  332. }
  333. Status SingleOpParser::VerifyOpInputOutputSizeByIr(const OpDesc &current_op_desc) {
  334. ge::Operator operator_ir = ge::OperatorFactory::CreateOperator("tmp_operator", current_op_desc.GetType());
  335. if (!operator_ir.IsEmpty()) {
  336. auto opdesc_ir = ge::OpDescUtils::GetOpDescFromOperator(operator_ir);
  337. GE_CHECK_NOTNULL(opdesc_ir);
  338. size_t current_opdesc_inputs_num = current_op_desc.GetInputsSize();
  339. size_t ir_opdesc_inputs_num = opdesc_ir->GetInputsSize();
  340. if (current_opdesc_inputs_num < ir_opdesc_inputs_num) {
  341. string reason = "is smaller than the ir needed input size " + std::to_string(ir_opdesc_inputs_num);
  342. ErrorManager::GetInstance().ATCReportErrMessage("E19014", {"opname", "value", "reason"},
  343. {current_op_desc.GetName(), "input size " + std::to_string(current_opdesc_inputs_num), reason});
  344. GELOGE(PARAM_INVALID, "This op [%s] input size %zu is smaller than the ir needed input size %zu",
  345. current_op_desc.GetName().c_str(), current_opdesc_inputs_num, ir_opdesc_inputs_num);
  346. return PARAM_INVALID;
  347. }
  348. size_t current_opdesc_outputs_num = current_op_desc.GetOutputsSize();
  349. size_t ir_opdesc_outputs_num = opdesc_ir->GetOutputsSize();
  350. if (current_opdesc_outputs_num < ir_opdesc_outputs_num) {
  351. string reason = "is smaller than the ir needed output size " + std::to_string(ir_opdesc_outputs_num);
  352. ErrorManager::GetInstance().ATCReportErrMessage("E19014", {"opname", "value", "reason"},
  353. {current_op_desc.GetName(), "output size " + std::to_string(current_opdesc_outputs_num), reason});
  354. GELOGE(PARAM_INVALID, "This op [%s] output size %zu is smaller than the ir needed output size %zu",
  355. current_op_desc.GetName().c_str(), current_opdesc_outputs_num, ir_opdesc_outputs_num);
  356. return PARAM_INVALID;
  357. }
  358. }
  359. return SUCCESS;
  360. }
  361. Status SingleOpParser::SetShapeRange(const std::string &op_name,
  362. const SingleOpTensorDesc &tensor_desc,
  363. GeTensorDesc &ge_tensor_desc) {
  364. auto num_shape_ranges = tensor_desc.dim_ranges.size();
  365. GELOGD("Number of shape ranges = %zu", num_shape_ranges);
  366. auto it = std::find(tensor_desc.dims.begin(), tensor_desc.dims.end(), ge::UNKNOWN_DIM_NUM);
  367. if (it != tensor_desc.dims.end()) {
  368. if (tensor_desc.dims != ge::UNKNOWN_RANK) {
  369. ErrorManager::GetInstance().ATCReportErrMessage("E19014", {"opname", "value", "reason"},
  370. {op_name,
  371. "shape",
  372. "has unknown rank but dim size is not one"});
  373. GELOGE(PARAM_INVALID, "Invalid tensor shape: [%s]", ge_tensor_desc.MutableShape().ToString().c_str());
  374. return PARAM_INVALID;
  375. }
  376. if (!tensor_desc.dim_ranges.empty()) {
  377. ErrorManager::GetInstance().ATCReportErrMessage("E19014", {"opname", "value", "reason"},
  378. {op_name,
  379. "shape range",
  380. "is not needed while the rank the shape is unknown"});
  381. GELOGE(PARAM_INVALID, "Shape range is not needed while the rank the shape is unknown");
  382. return PARAM_INVALID;
  383. }
  384. GELOGD("Shape is unknown rank, do not set shape range");
  385. return SUCCESS;
  386. }
  387. std::vector<std::pair<int64_t, int64_t>> shape_range;
  388. size_t range_index = 0;
  389. for (auto dim : tensor_desc.dims) {
  390. if (dim >= 0) {
  391. shape_range.emplace_back(dim, dim);
  392. GELOGD("Adding shape range: [%ld, %ld]", dim, dim);
  393. } else {
  394. GELOGD("To get shape range by index = %zu", range_index);
  395. if (range_index >= num_shape_ranges) {
  396. string reason = "is smaller than the unknown dim size " + std::to_string(++range_index);
  397. ErrorManager::GetInstance().ATCReportErrMessage("E19014", {"opname", "value", "reason"},
  398. {op_name,
  399. "shape range size " + std::to_string(num_shape_ranges),
  400. reason});
  401. GELOGE(PARAM_INVALID, "The number of shape_range mismatches that of unknown dims.");
  402. return PARAM_INVALID;
  403. }
  404. auto &range = tensor_desc.dim_ranges[range_index];
  405. if (range.size() != kShapeRangePairSize) {
  406. string reason = "has " + std::to_string(range.size()) + " item(s)";
  407. ErrorManager::GetInstance().ATCReportErrMessage("E19014", {"opname", "value", "reason"},
  408. {op_name,
  409. "shape range " + std::to_string(range_index),
  410. reason});
  411. GELOGE(PARAM_INVALID, "Invalid shape range entry. index = %zu, size = %zu", range_index, range.size());
  412. return PARAM_INVALID;
  413. }
  414. shape_range.emplace_back(range[kShapeRangeLow], range[kShapeRangeHigh]);
  415. GELOGD("Adding shape range: [%ld, %ld]", range[kShapeRangeLow], range[kShapeRangeHigh]);
  416. ++range_index;
  417. }
  418. }
  419. if (num_shape_ranges != range_index) {
  420. string reason = "is greater than the unknown dim size " + std::to_string(range_index);
  421. ErrorManager::GetInstance().ATCReportErrMessage("E19014", {"opname", "value", "reason"},
  422. {op_name,
  423. "shape range size " + std::to_string(num_shape_ranges),
  424. reason});
  425. GELOGE(PARAM_INVALID,
  426. "The number of shape_range(%zu) mismatches that of unknown dims(%zu).",
  427. num_shape_ranges,
  428. range_index);
  429. return PARAM_INVALID;
  430. }
  431. if (range_index > 0) {
  432. ge_tensor_desc.SetShapeRange(shape_range);
  433. }
  434. return SUCCESS;
  435. }
  436. Status SingleOpParser::ParseSingleOpList(const std::string &file, std::vector<SingleOpBuildParam> &op_list) {
  437. int index = 0;
  438. try {
  439. Json single_op_list_json;
  440. auto ret = ReadJsonFile(file, single_op_list_json);
  441. if (ret != SUCCESS) {
  442. return ret;
  443. }
  444. for (const Json &single_op_json : single_op_list_json) {
  445. SingleOpDesc single_op_desc;
  446. GELOGI("Parsing op[%d], jsonStr = %s", index, single_op_json.dump(kDumpJsonIndent).c_str());
  447. single_op_desc = single_op_json;
  448. if (UpdateDynamicTensorName(single_op_desc.input_desc) != SUCCESS) {
  449. GELOGE(FAILED, "Update dynamic tensor name failed!");
  450. return FAILED;
  451. }
  452. if (!Validate(single_op_desc)) {
  453. GELOGE(PARAM_INVALID, "Validate the index[%d] of op failed when read json file[%s].", index, file.c_str());
  454. return PARAM_INVALID;
  455. }
  456. SingleOpBuildParam param;
  457. ret = ConvertToBuildParam(index, single_op_desc, param);
  458. if (ret != SUCCESS) {
  459. return ret;
  460. }
  461. op_list.emplace_back(param);
  462. GELOGI("Parse the index[%d] of op success", index);
  463. index += 1;
  464. }
  465. } catch (const nlohmann::json::exception &e) {
  466. ErrorManager::GetInstance().ATCReportErrMessage("E10032", {"index", "jsonfile", "exception"},
  467. {std::to_string(index), file, e.what()});
  468. GELOGE(PARAM_INVALID, "Parse the index[%d] of op failed when read json file[%s], exception %s",
  469. index, file.c_str(), e.what());
  470. return PARAM_INVALID;
  471. }
  472. return SUCCESS;
  473. }
  474. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示