You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

multi_batch_options.cc 25 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "multi_batch_options.h"
  17. #include "framework/common/debug/ge_log.h"
  18. #include "framework/omg/omg_inner_types.h"
  19. #include "framework/common/util.h"
  20. #include "framework/common/string_util.h"
  21. #include "common/formats/utils/formats_trans_utils.h"
  22. #include "common/util/error_manager/error_manager.h"
  23. #include "graph/debug/ge_attr_define.h"
  24. #include "graph/utils/node_utils.h"
  25. #include "graph/ge_context.h"
  26. #include "graph/common/local_context.h"
  27. #include "framework/common/types.h"
  28. #include "graph/compute_graph.h"
  29. #include "graph/utils/graph_utils.h"
  30. #include "graph/common/omg_util.h"
  31. namespace ge {
  32. namespace multibatch {
  33. constexpr int kDecimal = 10;
  34. constexpr uint8_t kMaxShapesCount = 100;
  35. constexpr uint8_t kMinShapesCount = 2;
  36. const int kDynmaicDims = -1;
  37. const int kDynamicBatchDynamicDimsNum = 1;
  38. const int kDynamicImgSizeDynamciDimsNum = 2;
  39. const size_t kMaxNDDimNum = 4;
  40. const size_t kMinNDDimNum = 1;
  41. const size_t kNumOfGetnextNode = 1;
  42. const int kDivisionConst = 2;
  43. const char *const kSubstrOfGetNextNosinkName = "IteratorGetNext";
  44. const char *const kShapeDataName = "ascend_mbatch_shape_data";
  45. const char *const kGetNextName = "IteratorV2";
  46. inline bool IsGetNextType(const NodePtr &node) {
  47. std::string original_type;
  48. GE_IF_BOOL_EXEC(GetOriginalType(node, original_type) != SUCCESS,
  49. GELOGW("Get original type failed."); return false);
  50. return (original_type == kGetNextName);
  51. }
  52. void ParseDynamicSize(string dynamic_size, vector<vector<int64_t>> &shapes) {
  53. std::vector<std::string> shape_strs = ge::StringUtils::Split(dynamic_size, ';');
  54. for (const auto &shape_str : shape_strs) {
  55. if (shape_str.empty()) {
  56. continue;
  57. }
  58. std::vector<int64_t> shape;
  59. std::vector<std::string> dims = ge::StringUtils::Split(shape_str, ',');
  60. for (const auto &dim : dims) {
  61. if (dim.empty()) {
  62. continue;
  63. }
  64. shape.emplace_back(std::strtol(dim.c_str(), nullptr, kDecimal));
  65. }
  66. if (!shape.empty()) {
  67. shapes.emplace_back(shape);
  68. }
  69. }
  70. }
  71. Status DistinguishGetNextAndData(ComputeGraphPtr &graph, vector<NodePtr> &data_nodes,
  72. vector<NodePtr> &getnext_nosink_nodes, vector<NodePtr> &getnext_sink_nodes) {
  73. GELOGD("Start distinguish getnext and data node.");
  74. for (NodePtr &input_node : graph->GetDirectNode()) {
  75. GE_CHECK_NOTNULL(input_node);
  76. OpDescPtr op_desc = input_node->GetOpDesc();
  77. GE_CHECK_NOTNULL(op_desc);
  78. if (op_desc->GetType() == DATA && op_desc->GetName() != kShapeDataName) {
  79. if (op_desc->GetName().find(kSubstrOfGetNextNosinkName) == string::npos) {
  80. data_nodes.emplace_back(input_node);
  81. } else {
  82. getnext_nosink_nodes.emplace_back(input_node);
  83. }
  84. }
  85. if (IsGetNextType(input_node)) {
  86. GELOGD("Name of getnext sink is %s.", op_desc->GetName().c_str());
  87. getnext_sink_nodes.emplace_back(input_node);
  88. }
  89. }
  90. GELOGI("Data count is %zu, getnext nosink count is %zu, getnext sink count is %zu.", data_nodes.size(),
  91. getnext_nosink_nodes.size(), getnext_sink_nodes.size());
  92. return SUCCESS;
  93. }
  94. Status CheckSequenceOfData(ComputeGraphPtr &graph, const vector<NodePtr> &data_nodes) {
  95. GELOGD("Start check input sequence from data nodes and input shape.");
  96. if (data_nodes.size() != GetLocalOmgContext().user_input_dims.size()) {
  97. GELOGE(PARAM_INVALID, "The count of input shape:%zu should be equal to the count of data num:%zu.",
  98. GetLocalOmgContext().user_input_dims.size(), data_nodes.size());
  99. return PARAM_INVALID;
  100. }
  101. for (size_t i = 0; i < data_nodes.size(); ++i) {
  102. auto data_node = data_nodes.at(i);
  103. GE_CHECK_NOTNULL(data_node);
  104. GE_CHECK_NOTNULL(data_node->GetOpDesc());
  105. auto output_shape = data_node->GetOpDesc()->GetOutputDesc(0).GetShape().GetDims();
  106. auto dynamic_dims = GetLocalOmgContext().user_input_dims.at(i).second;
  107. if (output_shape.empty() && dynamic_dims.size() == 1 && dynamic_dims.at(0) == 0) {
  108. GELOGI("No need to check sequence for constant.");
  109. continue;
  110. }
  111. if (dynamic_dims.size() != output_shape.size()) {
  112. GELOGE(PARAM_INVALID, "The output shape of %s is %s, the input shape from options of %s is %s.",
  113. data_node->GetName().c_str(), formats::JoinToString(output_shape).c_str(),
  114. GetLocalOmgContext().user_input_dims.at(i).first.c_str(), formats::JoinToString(dynamic_dims).c_str());
  115. return PARAM_INVALID;
  116. }
  117. for (size_t j = 0; j < dynamic_dims.size(); ++j) {
  118. if (dynamic_dims.at(j) != kDynmaicDims && dynamic_dims.at(j) != output_shape.at(j)) {
  119. GELOGE(INTERNAL_ERROR, "Value of input shape %s should be equal to %s.",
  120. formats::JoinToString(dynamic_dims).c_str(), formats::JoinToString(output_shape).c_str());
  121. return INTERNAL_ERROR;
  122. }
  123. }
  124. }
  125. return SUCCESS;
  126. }
  127. Status CheckSequenceOfGetnext(ComputeGraphPtr &graph, const vector<NodePtr> &getnext_sink_node) {
  128. GELOGD("Start check input sequence from getnext sink nodes and input shape.");
  129. if (getnext_sink_node.size() != kNumOfGetnextNode) {
  130. GELOGE(PARAM_INVALID, "Not support dynamic dims when a graph with multi getnext nodes.");
  131. return PARAM_INVALID;
  132. }
  133. auto data_node = getnext_sink_node.at(0);
  134. GE_CHECK_NOTNULL(data_node);
  135. auto op_desc = data_node->GetOpDesc();
  136. GE_CHECK_NOTNULL(op_desc);
  137. size_t data_count = data_node->GetAllOutDataAnchors().size() / kDivisionConst;
  138. if (data_count != GetLocalOmgContext().user_input_dims.size()) {
  139. GELOGE(PARAM_INVALID, "Output count of %s is %zu, should be equal to count of input shape: %zu",
  140. op_desc->GetName().c_str(), data_count, GetLocalOmgContext().user_input_dims.size());
  141. return PARAM_INVALID;
  142. }
  143. for (size_t i = 0; i < data_count; ++i) {
  144. auto output_shape = data_node->GetOpDesc()->GetOutputDesc(i).GetShape().GetDims();
  145. auto dynamic_dims = GetLocalOmgContext().user_input_dims.at(i).second;
  146. if (output_shape.empty() && dynamic_dims.size() == 1 && dynamic_dims.at(0) == 0) {
  147. GELOGI("No need to check sequence for constant.");
  148. continue;
  149. }
  150. if (dynamic_dims.size() != output_shape.size()) {
  151. GELOGE(PARAM_INVALID, "the output_shape of %s is %s, the input_shape from options of %s is %s.",
  152. data_node->GetName().c_str(), formats::JoinToString(output_shape).c_str(),
  153. GetLocalOmgContext().user_input_dims.at(i).first.c_str(), formats::JoinToString(dynamic_dims).c_str());
  154. return PARAM_INVALID;
  155. }
  156. for (size_t j = 0; j < dynamic_dims.size(); ++j) {
  157. if (dynamic_dims.at(j) != kDynmaicDims && dynamic_dims.at(j) != output_shape.at(j)) {
  158. GELOGE(INTERNAL_ERROR, "value of input_shape %s should be equal to %s.",
  159. formats::JoinToString(dynamic_dims).c_str(), formats::JoinToString(output_shape).c_str());
  160. return INTERNAL_ERROR;
  161. }
  162. }
  163. }
  164. return SUCCESS;
  165. }
  166. Status CheckSequenceOfOptions(ComputeGraphPtr &graph, vector<NodePtr> &data_nodes,
  167. vector<NodePtr> &getnext_nosink_nodes, vector<NodePtr> &getnext_sink_nodes) {
  168. if (GetLocalOmgContext().dynamic_node_type.empty()) {
  169. GELOGI("No need to CheckSequenceOfOptions.");
  170. return SUCCESS;
  171. }
  172. if (DistinguishGetNextAndData(graph, data_nodes, getnext_nosink_nodes, getnext_sink_nodes) != SUCCESS) {
  173. GELOGE(PARAM_INVALID, "DistinguishGetNextAndData failed.");
  174. return PARAM_INVALID;
  175. }
  176. if (GetLocalOmgContext().dynamic_node_type == DATA) {
  177. GELOGD("Users want data nodes to be dynamic.");
  178. if (CheckSequenceOfData(graph, data_nodes) != SUCCESS) {
  179. GELOGE(PARAM_INVALID, "Failed to check sequence of data nodes.");
  180. return PARAM_INVALID;
  181. }
  182. } else {
  183. GELOGD("Users want getnext nodes to be dynamic.");
  184. if (!getnext_nosink_nodes.empty()) {
  185. if (CheckSequenceOfData(graph, getnext_nosink_nodes) != SUCCESS) {
  186. GELOGE(PARAM_INVALID, "Failed to check sequence of getnext nosink nodes.");
  187. return PARAM_INVALID;
  188. }
  189. } else {
  190. if (CheckSequenceOfGetnext(graph, getnext_sink_nodes) != SUCCESS) {
  191. GELOGE(PARAM_INVALID, "Failed to check sequence of getnext sink nodes.");
  192. return PARAM_INVALID;
  193. }
  194. }
  195. }
  196. return SUCCESS;
  197. }
  198. Status UpdateNameOfData(ComputeGraphPtr &graph, const vector<NodePtr> &data_nodes) {
  199. GELOGD("Update first value of input shape by data nodes.");
  200. if (data_nodes.size() != GetLocalOmgContext().user_input_dims.size()) {
  201. GELOGE(PARAM_INVALID, "count of data_nodes: %zu should be equal to input_shape count: %zu.",
  202. data_nodes.size(), GetLocalOmgContext().user_input_dims.size());
  203. return PARAM_INVALID;
  204. }
  205. for (size_t i = 0; i < data_nodes.size(); ++i) {
  206. GELOGD("The %zu data name is %s.", i, data_nodes.at(i)->GetOpDesc()->GetName().c_str());
  207. GetLocalOmgContext().user_input_dims.at(i).first = data_nodes.at(i)->GetOpDesc()->GetName();
  208. }
  209. return SUCCESS;
  210. }
  211. Status UpdateNameOfGetnext(ComputeGraphPtr &graph, const vector<NodePtr> &getnext_sink_nodes) {
  212. GELOGD("Update first value of input shape by getnext sink nodes.");
  213. if (getnext_sink_nodes.size() != kNumOfGetnextNode) {
  214. GELOGE(PARAM_INVALID, "Not support dynamic dims when a graph with multi getnext nodes.");
  215. return PARAM_INVALID;
  216. }
  217. auto input_node = getnext_sink_nodes.at(0);
  218. GE_CHECK_NOTNULL(input_node);
  219. auto op_desc = input_node->GetOpDesc();
  220. GE_CHECK_NOTNULL(op_desc);
  221. // user want getnext dynamic, just getnext or data+getnext_sink
  222. size_t data_count = input_node->GetAllOutDataAnchors().size() / kDivisionConst;
  223. if (data_count != GetLocalOmgContext().user_input_dims.size()) {
  224. GELOGE(PARAM_INVALID, "Output count of %s is %zu, should be equal to count of input shape: %zu",
  225. op_desc->GetName().c_str(), data_count, GetLocalOmgContext().user_input_dims.size());
  226. return PARAM_INVALID;
  227. }
  228. for (size_t i = 0; i < data_count; ++i) {
  229. string data_name = op_desc->GetName() + "_" + std::to_string(i);
  230. GELOGD("Data just from getnext sink is %s.", data_name.c_str());
  231. GetLocalOmgContext().user_input_dims.at(i).first = data_name;
  232. }
  233. return SUCCESS;
  234. }
  235. // need to distinguish online and offline, offline no need to update the name of input_shape
  236. Status UpdateNameOfInputShape(ComputeGraphPtr &graph, const vector<NodePtr> &data_nodes,
  237. const vector<NodePtr> &getnext_nosink_nodes, const vector<NodePtr> &getnext_sink_nodes) {
  238. if (GetLocalOmgContext().dynamic_node_type.empty()) {
  239. GELOGI("No need to update first value of input shape when offline infer.");
  240. return SUCCESS;
  241. }
  242. if (GetLocalOmgContext().dynamic_node_type == DATA) {
  243. GELOGD("Users want data nodes to be dynamic.");
  244. if (UpdateNameOfData(graph, data_nodes) != SUCCESS) {
  245. GELOGE(PARAM_INVALID, "Failed to update first value of input shape of data nodes.");
  246. return PARAM_INVALID;
  247. }
  248. } else {
  249. GELOGD("Users want getnext nodes to be dynamic.");
  250. if (!getnext_nosink_nodes.empty()) {
  251. if (UpdateNameOfData(graph, getnext_nosink_nodes) != SUCCESS) {
  252. GELOGE(PARAM_INVALID, "Failed to update first value of input shape of getnext nosink nodes.");
  253. return PARAM_INVALID;
  254. }
  255. } else {
  256. if (UpdateNameOfGetnext(graph, getnext_sink_nodes) != SUCCESS) {
  257. GELOGE(PARAM_INVALID, "Failed to update first value of input shape of getnext sink nodes.");
  258. return PARAM_INVALID;
  259. }
  260. }
  261. }
  262. return SUCCESS;
  263. }
  264. Status DeleteIdentityInsertByAdapter(ComputeGraphPtr &graph) {
  265. GELOGD("Start delete identity node inserted by adapter.");
  266. for (NodePtr &node : graph->GetDirectNode()) {
  267. GE_CHECK_NOTNULL(node);
  268. OpDescPtr op_desc = node->GetOpDesc();
  269. GE_CHECK_NOTNULL(op_desc);
  270. if (IsGetNextType(node)) {
  271. for (auto &out_data_anchor : node->GetAllOutDataAnchors()) {
  272. GE_IF_BOOL_EXEC(out_data_anchor == nullptr, continue);
  273. for (auto &peer_in_anchor : out_data_anchor->GetPeerInDataAnchors()) {
  274. GE_IF_BOOL_EXEC(peer_in_anchor == nullptr, continue);
  275. auto dst_node = peer_in_anchor->GetOwnerNode();
  276. GE_IF_BOOL_EXEC(dst_node == nullptr, continue);
  277. if (dst_node->GetType() == IDENTITY) {
  278. GELOGI("Need to remove %s.", dst_node->GetName().c_str());
  279. if (ge::GraphUtils::RemoveNodeWithoutRelink(graph, dst_node) != GRAPH_SUCCESS) {
  280. GELOGE(FAILED, "Remove Identity node %s failed.", dst_node->GetName().c_str());
  281. return FAILED;
  282. }
  283. }
  284. }
  285. }
  286. }
  287. }
  288. return SUCCESS;
  289. }
  290. Status CheckNegativeCountOfOptions(const std::vector<std::vector<int64_t>> &shapes) {
  291. if (!GetLocalOmgContext().dynamic_dims.empty()) {
  292. size_t negative_count = 0;
  293. for (size_t i = 0; i < GetLocalOmgContext().user_input_dims.size(); ++i) {
  294. for (size_t j = 0; j < GetLocalOmgContext().user_input_dims.at(i).second.size(); ++j) {
  295. if (GetLocalOmgContext().user_input_dims.at(i).second.at(j) == kDynmaicDims) {
  296. negative_count++;
  297. }
  298. }
  299. }
  300. for (size_t i = 0; i < shapes.size(); ++i) {
  301. if (shapes.at(i).size() != negative_count) {
  302. GELOGE(PARAM_INVALID, "Each gear num of dynamic_dims is %zu should be equal to %zu.", shapes.at(i).size(),
  303. negative_count);
  304. return PARAM_INVALID;
  305. }
  306. }
  307. }
  308. return SUCCESS;
  309. }
  310. ///
  311. /// @ingroup ge
  312. /// @brief Init Dynamic Param from Options.
  313. /// @param [out] std::vector<std::vector<int64_t>> &shapes: Result for Params.
  314. /// @return true: Configed for Multi batch / false: Not configed for Multi batch.
  315. ///
  316. bool InitDynamicParams(vector<vector<int64_t>> &shapes) {
  317. if (!GetLocalOmgContext().dynamic_batch_size.empty()) {
  318. GELOGD("Found dynamic batch option, value %s", GetLocalOmgContext().dynamic_batch_size.c_str());
  319. std::vector<std::string> dims = ge::StringUtils::Split(GetLocalOmgContext().dynamic_batch_size, ',');
  320. for (const auto &dim : dims) {
  321. if (dim.empty()) {
  322. continue;
  323. }
  324. shapes.emplace_back(std::vector<int64_t>({std::strtol(dim.c_str(), nullptr, kDecimal)}));
  325. GELOGI("Found dynamic batch, shape %s", formats::JoinToString(*shapes.rbegin()).c_str());
  326. }
  327. }
  328. if (!GetLocalOmgContext().dynamic_image_size.empty()) {
  329. GELOGD("Found dynamic image size option, value %s", GetLocalOmgContext().dynamic_image_size.c_str());
  330. ParseDynamicSize(GetLocalOmgContext().dynamic_image_size, shapes);
  331. for (const auto &shape : shapes) {
  332. GELOGI("Found dynamic image size, shape %s", formats::JoinToString(shape).c_str());
  333. }
  334. }
  335. if (!GetLocalOmgContext().dynamic_dims.empty()) {
  336. GELOGD("Found dynamic dims option, value %s", GetLocalOmgContext().dynamic_dims.c_str());
  337. ParseDynamicSize(GetLocalOmgContext().dynamic_dims, shapes);
  338. for (const auto &shape : shapes) {
  339. GELOGI("Found dynamic dims, shape %s", formats::JoinToString(shape).c_str());
  340. }
  341. }
  342. return !shapes.empty();
  343. }
  344. ///
  345. /// @ingroup ge
  346. /// @brief parse each data's own dynamic dims.
  347. /// @param [out] map<string, vector<vector<int64_t>>> &data_to_dynamic_info: key:data_name. value:dynamic dims.
  348. /// @return true: Configed for Multi batch / false: Not configed for Multi batch.
  349. ///
  350. Status ParserDataToDynmaicInfo(const vector<vector<int64_t>> &shapes,
  351. vector<pair<string, vector<int64_t>>> &data_name_and_shape,
  352. map<string, vector<vector<int64_t>> > &data_to_dynamic_info) {
  353. size_t cur_data_index = 0;
  354. for (size_t index = 0; index < data_name_and_shape.size(); ++index) {
  355. auto &cur_item = data_name_and_shape[index];
  356. auto &data_name = cur_item.first;
  357. auto &data_shape = cur_item.second;
  358. auto dynamic_dims_num = std::count_if(data_shape.begin(), data_shape.end(),
  359. [&data_shape](int64_t dim){ return dim < 0; });
  360. GELOGI("Train_Dynamic dynamic_dims_num of %s is %zu", data_name.c_str(), dynamic_dims_num);
  361. vector<vector<int64_t> > dynamic_info;
  362. for (auto &dynamic_gear_info : shapes) {
  363. GELOGI("Train_Dynamic dynamic_gear_info is %s", formats::JoinToString(dynamic_gear_info).c_str());
  364. vector<int64_t> one_gear;
  365. if (dynamic_gear_info.size() == static_cast<size_t>(dynamic_dims_num)) {
  366. one_gear = dynamic_gear_info;
  367. } else if (dynamic_gear_info.size() > static_cast<size_t>(dynamic_dims_num)) {
  368. auto tmp_index = cur_data_index;
  369. for (size_t i = 0; i < static_cast<size_t>(dynamic_dims_num); ++i) {
  370. if (tmp_index >= dynamic_gear_info.size()) {
  371. ErrorManager::GetInstance().ATCReportErrMessage(
  372. "E10045", {"name", "shape"}, {data_name, formats::JoinToString(data_shape)});
  373. GELOGE(PARAM_INVALID, "Data: %s shape: %s make dynamic dims overflow", data_name.c_str(),
  374. formats::JoinToString(data_shape).c_str());
  375. return FAILED;
  376. }
  377. one_gear.push_back(dynamic_gear_info[tmp_index++]);
  378. }
  379. } else {
  380. ErrorManager::GetInstance().ATCReportErrMessage(
  381. "E10046", {"name", "shape"}, {data_name, formats::JoinToString(data_shape)});
  382. GELOGE(PARAM_INVALID, "Dynamic dims num of data: %s shape: %s can not be more than one gear dynamic info size",
  383. data_name.c_str(), formats::JoinToString(data_shape).c_str());
  384. return FAILED;
  385. }
  386. GELOGI("Train_Dynamic one_gear is %s.", formats::JoinToString(one_gear).c_str());
  387. dynamic_info.push_back(one_gear);
  388. }
  389. cur_data_index += dynamic_dims_num;
  390. data_to_dynamic_info[data_name] = dynamic_info;
  391. }
  392. return SUCCESS;
  393. }
  394. ///
  395. /// @ingroup ge
  396. /// @brief Check Dynamic Param is invalid.
  397. /// @param [in] const vector<vector<int64_t>> &shapes: Params for check.
  398. /// @return SUCCESS: valid / PARAM_INVALID: invalid.
  399. ///
  400. Status CheckDynamicParams(const vector<vector<int64_t>> &shapes) {
  401. if (shapes.size() < kMinShapesCount) {
  402. ErrorManager::GetInstance().ATCReportErrMessage(
  403. "E10035", {"shapesize", "minshapesize"}, {std::to_string(shapes.size()), std::to_string(kMinShapesCount - 1)});
  404. GELOGE(PARAM_INVALID,
  405. "Input parameter[--dynamic_batch_size, --dynamic_image_size or --dynamic_dims]'s "
  406. "value size [%zu] must be greater than [%zu].",
  407. shapes.size(), kMinShapesCount - 1);
  408. return PARAM_INVALID;
  409. }
  410. if (shapes.size() > kMaxShapesCount) {
  411. ErrorManager::GetInstance().ATCReportErrMessage(
  412. "E10036", {"shapesize", "maxshapesize"}, {std::to_string(shapes.size()), std::to_string(kMaxShapesCount + 1)});
  413. GELOGE(PARAM_INVALID,
  414. "Input parameter[--dynamic_batch_size, --dynamic_image_size or --dynamic_dims]'s "
  415. "value size [%zu] must be less than [%zu].",
  416. shapes.size(), kMaxShapesCount + 1);
  417. return PARAM_INVALID;
  418. }
  419. std::set<std::vector<int64_t>> shapes_set;
  420. size_t shape_size = shapes.at(0).size();
  421. for (auto &shape : shapes) {
  422. if (shape_size != shape.size()) {
  423. ErrorManager::GetInstance().ATCReportErrMessage("E10037", {"shapesize1", "shapesize2"},
  424. {std::to_string(shape_size), std::to_string(shape.size())});
  425. GELOGE(PARAM_INVALID,
  426. "Input parameter[--dynamic_batch_size, --dynamic_image_size or --dynamic_dims]'s "
  427. "value size must be same, first group's size is %zu and another's is %zu.",
  428. shape_size, shape.size());
  429. return PARAM_INVALID;
  430. }
  431. for (auto dim : shape) {
  432. if (dim <= 0) {
  433. ErrorManager::GetInstance().ATCReportErrMessage("E10038", {"dim"}, {std::to_string(dim)});
  434. GELOGE(PARAM_INVALID, "Invalid dim %ld, all dims must be greater than 0", dim);
  435. return PARAM_INVALID;
  436. }
  437. }
  438. shapes_set.insert(shape);
  439. }
  440. if (shapes_set.size() != shapes.size()) {
  441. ErrorManager::GetInstance().ATCReportErrMessage("E10039");
  442. GELOGE(PARAM_INVALID,
  443. "Input parameter[--dynamic_batch_size, --dynamic_image_size or --dynamic_dims] exist duplicate shapes.");
  444. return PARAM_INVALID;
  445. }
  446. return SUCCESS;
  447. }
  448. ///
  449. /// @ingroup ge
  450. /// @brief Get GeShape from configed shape.
  451. /// @param [in] const std::vector<int64_t> &batch_shape: Configed shape.
  452. /// @param [out] GeShape &data_shape: GeShape for configed shape.
  453. /// @return SUCCESS / PARAM_INVALID
  454. ///
  455. Status CalcShape(const std::vector<int64_t> &batch_shape, GeShape &data_shape) {
  456. size_t batch_shape_index = 0;
  457. for (size_t i = 0; i < data_shape.GetDimNum(); ++i) {
  458. if (data_shape.GetDim(i) < 0) {
  459. if (batch_shape_index >= batch_shape.size()) {
  460. ErrorManager::GetInstance().ATCReportErrMessage(
  461. "E19012", {"function", "reason"},
  462. {"CalcShape", "the batch shape count " + std::to_string(batch_shape.size()) +
  463. " does not match the data shape " + data_shape.ToString()});
  464. GELOGE(PARAM_INVALID,
  465. "Failed to calc tensor shape, the batch shape count %zu, does not match the data shape %s",
  466. batch_shape.size(), data_shape.ToString().c_str());
  467. return PARAM_INVALID;
  468. }
  469. data_shape.SetDim(i, batch_shape[batch_shape_index++]);
  470. }
  471. }
  472. GELOGI("CalcShape size of batch_shape is %zu, batch_shape_index is %zu.", batch_shape.size(), batch_shape_index);
  473. if (batch_shape_index != batch_shape.size()) {
  474. ErrorManager::GetInstance().ATCReportErrMessage(
  475. "E19012", {"function", "reason"}, {"CalcShape", "the batch shape count " + std::to_string(batch_shape.size()) +
  476. " does not match the data shape " + data_shape.ToString()});
  477. GELOGE(PARAM_INVALID, "Failed to calc tensor shape, the batch shape count %zu, does not match the data shape %s",
  478. batch_shape.size(), data_shape.ToString().c_str());
  479. return PARAM_INVALID;
  480. }
  481. return SUCCESS;
  482. }
  483. ///
  484. /// @ingroup ge
  485. /// @brief Set mbatch_dynamic_type on node.
  486. /// @param [in] const OpDescPtr &op_desc: Node for set attribute.
  487. /// @return 0: SUCCESS / others: INTERNAL_ERROR
  488. ///
  489. Status StampDynamicType(const OpDescPtr &op_desc) {
  490. GE_CHECK_NOTNULL(op_desc);
  491. int32_t dynamic_type = static_cast<int32_t>(FIXED);
  492. if (!GetLocalOmgContext().dynamic_batch_size.empty()) {
  493. dynamic_type = static_cast<int32_t>(DYNAMIC_BATCH);
  494. }
  495. if (!GetLocalOmgContext().dynamic_image_size.empty()) {
  496. dynamic_type = static_cast<int32_t>(DYNAMIC_IMAGE);
  497. }
  498. if (!GetLocalOmgContext().dynamic_dims.empty()) {
  499. dynamic_type = static_cast<int32_t>(DYNAMIC_DIMS);
  500. }
  501. if (!AttrUtils::SetInt(op_desc, ATTR_DYNAMIC_TYPE, dynamic_type)) {
  502. GELOGE(INTERNAL_ERROR, "Failed to add dynamic type attr for node %s", op_desc->GetName().c_str());
  503. return INTERNAL_ERROR;
  504. }
  505. return SUCCESS;
  506. }
  507. ///
  508. /// @ingroup ge
  509. /// @brief Check dynamic batch Shape.
  510. /// @param [in] const vector<int64_t> &shape: data_shape to be checked.
  511. /// @param [in] const string &data_name: cur data name.
  512. /// @return 0: true/false
  513. ///
  514. bool CheckDynamicBatchShape(const vector<int64_t> &shape, const string &data_name) {
  515. if (shape[0] == kDynmaicDims) {
  516. for (size_t i = 1; i < shape.size(); ++i) {
  517. if (shape[i] < 1) {
  518. ErrorManager::GetInstance().ATCReportErrMessage("E10018", {"index", "shape"},
  519. {std::to_string(i), std::to_string(shape[i])});
  520. GELOGE(ge::PARAM_INVALID,
  521. "Only batch N can be -1 when set --dynamic_batch_size, current data: %s shape[%zu] is %ld",
  522. data_name.c_str(), i, shape[i]);
  523. return false;
  524. }
  525. }
  526. return true;
  527. } else {
  528. return false;
  529. }
  530. }
  531. ///
  532. /// @ingroup ge
  533. /// @brief Check Dynamic image size shape.
  534. /// @param [in] unordered_map<string, vector<int64_t>> &shape_map: map of data_name and data_shape.
  535. /// @param [in] const std::string &input_format: format of input.
  536. /// @return 0: true/false
  537. ///
  538. bool CheckDynamicImageSizeShape(const vector<int64_t> &shape, const string &data_name,
  539. const std::string &input_format) {
  540. int64_t height = 0;
  541. int64_t width = 0;
  542. if (input_format == "NCHW") {
  543. height = shape[NCHW_DIM_H];
  544. width = shape[NCHW_DIM_W];
  545. }
  546. if (input_format == "NHWC") {
  547. height = shape[NHWC_DIM_H];
  548. width = shape[NHWC_DIM_W];
  549. }
  550. if (height == kDynmaicDims && width == kDynmaicDims &&
  551. std::count(shape.begin(), shape.end(), kDynmaicDims) == kDynamicImgSizeDynamciDimsNum) {
  552. return true;
  553. } else {
  554. ErrorManager::GetInstance().ATCReportErrMessage("E10019");
  555. GELOGE(ge::PARAM_INVALID,
  556. "--input_shape's shape is invalid, only height and width can be -1 when set --dynamic_image_size.");
  557. return false;
  558. }
  559. }
  560. } // namespace multibatch
  561. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示