Author | SHA1 | Message | Date |
---|---|---|---|
|
ad3e78974a |
!289 dts: onnx parser dim value set -1 when model is dynamic
Merge pull request !289 from zhengyuanhua/r1.3.0 |
4 years ago |
|
3b74120792 | dts: onnex parser dim value set -1 when model is dynamic | 4 years ago |
|
d4587c1c33 |
!274 fixed sc warning
Merge pull request !274 from 李磊/sc |
4 years ago |
|
30de9553b1 | fixed warn check | 4 years ago |
|
fda1f22e88 |
!273 no need change name for one to many
Merge pull request !273 from yangyongqiang/r1.3.0 |
4 years ago |
|
e2d08da811 | no need change name for one to many | 4 years ago |
|
d851e1d467 |
!267 update submodule index
Merge pull request !267 from 李磊/r1.3.0 |
4 years ago |
|
6d5c0468a9 |
!269 update owners
Merge pull request !269 from 王涛/r1.3.0 |
4 years ago |
|
a1ee0a3078 | update OWNERS. | 4 years ago |
|
48050716b3 | update submodule | 4 years ago |
|
b203d47837 |
!260 update submodule
Merge pull request !260 from 王涛/r1.3.0 |
4 years ago |
|
7bd2137e01 | update submodule | 4 years ago |
@@ -1,4 +1,4 @@ | |||||
[submodule "metadef"] | [submodule "metadef"] | ||||
path = metadef | path = metadef | ||||
url = https://gitee.com/ascend/metadef.git | url = https://gitee.com/ascend/metadef.git | ||||
branch = master | |||||
branch = r1.3.0 |
@@ -2,6 +2,10 @@ approvers: | |||||
- ji_chen | - ji_chen | ||||
- wqtshg | - wqtshg | ||||
- ljl0711 | - ljl0711 | ||||
- startzgf168 | |||||
- lbisdaddy | |||||
- andylhy | |||||
- liyihan123 | |||||
reviewers: | reviewers: | ||||
- xchu42 | - xchu42 | ||||
- sheng-nan | - sheng-nan |
@@ -1 +1 @@ | |||||
Subproject commit deebd59d7ea015d7907db525596213492fe021b0 | |||||
Subproject commit e68940202b874ccec77d621f59b34fc4404bede2 |
@@ -29,7 +29,21 @@ | |||||
namespace ge { | namespace ge { | ||||
namespace { | namespace { | ||||
Status HandleNewOp(const NodePtr &node, const ComputeGraphPtr &compute_graph, const NodePtr &new_node) { | |||||
bool HasOneNonDataNode(const ComputeGraphPtr &graph) { | |||||
GE_CHECK_NOTNULL(graph); | |||||
int32_t non_data_nums = 0; | |||||
for (const auto& n : graph->GetDirectNode()) { | |||||
if (n->GetType() != parser::DATA) { | |||||
non_data_nums++; | |||||
} | |||||
} | |||||
GELOGD("graph has non data node num is %d", non_data_nums); | |||||
return (non_data_nums == 1); | |||||
} | |||||
Status HandleNewOp(const NodePtr &node, | |||||
const ComputeGraphPtr &compute_graph, | |||||
const NodePtr &new_node, | |||||
bool no_need_change_name) { | |||||
GE_CHECK_NOTNULL(node); | GE_CHECK_NOTNULL(node); | ||||
GE_CHECK_NOTNULL(new_node); | GE_CHECK_NOTNULL(new_node); | ||||
if (new_node->SetOwnerComputeGraph(compute_graph) != GRAPH_SUCCESS) { | if (new_node->SetOwnerComputeGraph(compute_graph) != GRAPH_SUCCESS) { | ||||
@@ -37,8 +51,13 @@ Status HandleNewOp(const NodePtr &node, const ComputeGraphPtr &compute_graph, co | |||||
return FAILED; | return FAILED; | ||||
} | } | ||||
auto op_desc = new_node->GetOpDesc(); | auto op_desc = new_node->GetOpDesc(); | ||||
static std::atomic_long new_node_index(0); | |||||
auto new_name = "PartitionedCall_" + new_node->GetName() + "_" + to_string(new_node_index++); | |||||
string new_name; | |||||
if (no_need_change_name) { | |||||
new_name = node->GetName(); | |||||
} else { | |||||
static std::atomic_long new_node_index(0); | |||||
new_name = "PartitionedCall_" + new_node->GetName() + "_" + to_string(new_node_index++); | |||||
} | |||||
op_desc->SetName(new_name); | op_desc->SetName(new_name); | ||||
bool ret = ge::AttrUtils::SetListStr(op_desc, | bool ret = ge::AttrUtils::SetListStr(op_desc, | ||||
ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, | ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, | ||||
@@ -91,11 +110,12 @@ Status ParserUtils::ExpandNodeToSubgraph(const Graph &subgraph, const NodePtr &n | |||||
GE_CHECK_NOTNULL(compute_graph); | GE_CHECK_NOTNULL(compute_graph); | ||||
// add subgraph node to graph. | // add subgraph node to graph. | ||||
bool no_need_change_name = HasOneNonDataNode(sub_compute_graph); | |||||
std::vector<NodePtr> input_nodes; | std::vector<NodePtr> input_nodes; | ||||
for (const auto &n : sub_compute_graph->GetDirectNode()) { | for (const auto &n : sub_compute_graph->GetDirectNode()) { | ||||
auto new_node = compute_graph->AddNode(n); | auto new_node = compute_graph->AddNode(n); | ||||
GE_CHECK_NOTNULL(new_node); | GE_CHECK_NOTNULL(new_node); | ||||
if (HandleNewOp(node, compute_graph, new_node) != SUCCESS) { | |||||
if (HandleNewOp(node, compute_graph, new_node, no_need_change_name) != SUCCESS) { | |||||
GELOGE(FAILED, "Handle new op[%s] for node[%s] failed.", new_node->GetName().c_str(), node->GetName().c_str()); | GELOGE(FAILED, "Handle new op[%s] for node[%s] failed.", new_node->GetName().c_str(), node->GetName().c_str()); | ||||
return FAILED; | return FAILED; | ||||
} | } | ||||
@@ -1,5 +1,5 @@ | |||||
#!/usr/bin/python3 | |||||
# -*- coding: utf-8 -*- | |||||
#!/usr/bin/env python | |||||
# coding: utf-8 | |||||
#------------------------------------------------------------------- | #------------------------------------------------------------------- | ||||
# Purpose: | # Purpose: | ||||
# Copyright 2020 Huawei Technologies Co., Ltd. All rights reserved. | # Copyright 2020 Huawei Technologies Co., Ltd. All rights reserved. | ||||
@@ -148,6 +148,7 @@ const std::map<std::string, std::string> kOnnxOpMap = { | |||||
}; | }; | ||||
const char* const MATMULV2 = "MatMulV2"; | const char* const MATMULV2 = "MatMulV2"; | ||||
const std::vector<std::string> kNoNeedUpdateFormat = {MATMULV2}; | const std::vector<std::string> kNoNeedUpdateFormat = {MATMULV2}; | ||||
const int64_t kDimValue = 1; | |||||
} | } | ||||
Status OnnxModelParser::ParseInput(ge::onnx::GraphProto &onnx_graph, | Status OnnxModelParser::ParseInput(ge::onnx::GraphProto &onnx_graph, | ||||
@@ -182,7 +183,10 @@ Status OnnxModelParser::ParseInput(ge::onnx::GraphProto &onnx_graph, | |||||
const ge::onnx::TensorShapeProto tensor_shape = type_proto_tensor.shape(); | const ge::onnx::TensorShapeProto tensor_shape = type_proto_tensor.shape(); | ||||
for (int j = 0; j < tensor_shape.dim_size(); j++) { | for (int j = 0; j < tensor_shape.dim_size(); j++) { | ||||
const ge::onnx::TensorShapeProto_Dimension dimension = tensor_shape.dim(j); | const ge::onnx::TensorShapeProto_Dimension dimension = tensor_shape.dim(j); | ||||
int64_t dim_value = dimension.dim_value(); | |||||
int64_t dim_value = -1; | |||||
if (dimension.value_case() == kDimValue) { | |||||
dim_value = dimension.dim_value(); | |||||
} | |||||
tensor_tmp.add_dims(dim_value); | tensor_tmp.add_dims(dim_value); | ||||
GELOGI("elem_type: %d, dim_value: %ld", elem_type, dim_value); | GELOGI("elem_type: %d, dim_value: %ld", elem_type, dim_value); | ||||
} | } | ||||