Browse Source

codecheck clean

pull/418/head
isaacxr 3 years ago
parent
commit
d3df886adb
12 changed files with 34 additions and 40 deletions
  1. +0
    -1
      inc/external/parser/caffe_parser.h
  2. +0
    -1
      inc/external/parser/onnx_parser.h
  3. +0
    -1
      inc/external/parser/tensorflow_parser.h
  4. +1
    -1
      parser/caffe/caffe_custom_parser_adapter.cc
  5. +1
    -1
      parser/caffe/caffe_custom_parser_adapter.h
  6. +1
    -1
      parser/onnx/onnx_parser.cc
  7. +2
    -2
      parser/tensorflow/tensorflow_fusion_op_parser.h
  8. +13
    -14
      parser/tensorflow/tensorflow_parser.cc
  9. +7
    -7
      parser/tensorflow/tensorflow_parser.h
  10. +1
    -1
      parser/tensorflow/tensorflow_parser_register.h
  11. +5
    -7
      parser/tensorflow/tensorflow_util.cc
  12. +3
    -3
      parser/tensorflow/tensorflow_util.h

+ 0
- 1
inc/external/parser/caffe_parser.h View File

@@ -38,7 +38,6 @@
#include "graph/ascend_string.h" #include "graph/ascend_string.h"
#include "graph/ge_error_codes.h" #include "graph/ge_error_codes.h"
#include "graph/graph.h" #include "graph/graph.h"
#include "graph/types.h"


namespace ge { namespace ge {
PARSER_FUNC_VISIBILITY graphStatus aclgrphParseCaffe(const char *model_file, const char *weights_file, PARSER_FUNC_VISIBILITY graphStatus aclgrphParseCaffe(const char *model_file, const char *weights_file,


+ 0
- 1
inc/external/parser/onnx_parser.h View File

@@ -34,7 +34,6 @@
#include "graph/ascend_string.h" #include "graph/ascend_string.h"
#include "graph/ge_error_codes.h" #include "graph/ge_error_codes.h"
#include "graph/graph.h" #include "graph/graph.h"
#include "graph/types.h"


namespace ge { namespace ge {
PARSER_FUNC_VISIBILITY graphStatus aclgrphParseONNX(const char *model_file, PARSER_FUNC_VISIBILITY graphStatus aclgrphParseONNX(const char *model_file,


+ 0
- 1
inc/external/parser/tensorflow_parser.h View File

@@ -39,7 +39,6 @@
#include "graph/ascend_string.h" #include "graph/ascend_string.h"
#include "graph/ge_error_codes.h" #include "graph/ge_error_codes.h"
#include "graph/graph.h" #include "graph/graph.h"
#include "graph/types.h"


namespace ge { namespace ge {
PARSER_FUNC_VISIBILITY graphStatus aclgrphParseTensorFlow(const char *model_file, ge::Graph &graph); PARSER_FUNC_VISIBILITY graphStatus aclgrphParseTensorFlow(const char *model_file, ge::Graph &graph);


+ 1
- 1
parser/caffe/caffe_custom_parser_adapter.cc View File

@@ -59,7 +59,7 @@ Status CaffeCustomParserAdapter::ParseParams(const Message *op_src, ge::OpDescPt
return SUCCESS; return SUCCESS;
} }


Status CaffeCustomParserAdapter::ParseParams(const Operator &op_src, ge::OpDescPtr &op_dest) {
Status CaffeCustomParserAdapter::ParseParams(const Operator &op_src, const ge::OpDescPtr &op_dest) {
GELOGI("Caffe custom op begin to params: layer name = %s, layer type= %s ", op_src.GetName().c_str(), GELOGI("Caffe custom op begin to params: layer name = %s, layer type= %s ", op_src.GetName().c_str(),
op_src.GetOpType().c_str()); op_src.GetOpType().c_str());
GE_CHECK_NOTNULL(op_dest); GE_CHECK_NOTNULL(op_dest);


+ 1
- 1
parser/caffe/caffe_custom_parser_adapter.h View File

@@ -42,7 +42,7 @@ class PARSER_FUNC_VISIBILITY CaffeCustomParserAdapter : public CaffeOpParser {
* @return FAILED parse failed * @return FAILED parse failed
* @author * @author
*/ */
static Status ParseParams(const Operator &op_src, ge::OpDescPtr &op_dest);
static Status ParseParams(const Operator &op_src, const ge::OpDescPtr &op_dest);


/** /**
* @ingroup domi_omg * @ingroup domi_omg


+ 1
- 1
parser/onnx/onnx_parser.cc View File

@@ -956,7 +956,7 @@ Status OnnxModelParser::ModelParseToGraphImpl(bool is_subgraph, ge::onnx::GraphP
return ret; return ret;
} }
// root graph needn't set outputs. // root graph needn't set outputs.
if(is_subgraph) {
if (is_subgraph) {
graph.SetOutputs(output_ops); graph.SetOutputs(output_ops);
} }




+ 2
- 2
parser/tensorflow/tensorflow_fusion_op_parser.h View File

@@ -67,11 +67,11 @@ class PARSER_FUNC_VISIBILITY TensorFlowFusionOpParser : public TensorFlowOpParse
* *
*/ */
// template <class T> // template <class T>
static Status ParseParamFromConst(const NodeDef *input_const, int32_t &param);
static Status ParseParamFromConst(const NodeDef *node_def, int32_t &param);


static Status ParseParamFromConst(const NodeDef *node_def, int32_t &param, int index); static Status ParseParamFromConst(const NodeDef *node_def, int32_t &param, int index);


static Status ParseParamFromConst(const NodeDef *input_const, float &param);
static Status ParseParamFromConst(const NodeDef *node_def, float &param);


static Status ParseParamFromConst(const NodeDef *node_def, float &param, int index); static Status ParseParamFromConst(const NodeDef *node_def, float &param, int index);




+ 13
- 14
parser/tensorflow/tensorflow_parser.cc View File

@@ -94,8 +94,7 @@ graphStatus aclgrphParseTensorFlow(const char *model_file, ge::Graph &graph) {


// load custom plugin so and proto // load custom plugin so and proto
AclGrphParseUtil acl_graph_parse_util; AclGrphParseUtil acl_graph_parse_util;
domi::Status status = acl_graph_parse_util.AclParserInitialize(options);
if (status != domi::SUCCESS) {
if (acl_graph_parse_util.AclParserInitialize(options) != domi::SUCCESS) {
GELOGE(GRAPH_FAILED, "Parser Initialize failed."); GELOGE(GRAPH_FAILED, "Parser Initialize failed.");
return GRAPH_FAILED; return GRAPH_FAILED;
} }
@@ -241,8 +240,7 @@ Status GenSubgraphParseTasks(const ge::ComputeGraphPtr &parent_graph, std::deque
if (ret != SUCCESS) { if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set subgraph:%s to node:%s(%s) failed, index:%u", subgraph_iname.c_str(), REPORT_CALL_ERROR("E19999", "Set subgraph:%s to node:%s(%s) failed, index:%u", subgraph_iname.c_str(),
node->GetName().c_str(), node->GetType().c_str(), i); node->GetName().c_str(), node->GetType().c_str(), i);
GELOGE(ret, "Failed to set subgraph %s to node %s index %u", subgraph_iname.c_str(), node->GetName().c_str(),
i);
GELOGE(ret, "Set subgraph %s to node %s failed, index %u", subgraph_iname.c_str(), node->GetName().c_str(), i);
return ret; return ret;
} }


@@ -356,7 +354,8 @@ Status MappingAndAddSubGraph(const NodePtr &node, const Graph &graph, const Comp
* @param [out] op: result of PartitionedCall OpDesc. * @param [out] op: result of PartitionedCall OpDesc.
* @return 0: SUCCESS / Others: FAILED * @return 0: SUCCESS / Others: FAILED
*/ */
Status TensorFlowModelParser::DefunToPartitionedCall(const domi::tensorflow::NodeDef *node_def, ge::OpDescPtr &op) {
Status TensorFlowModelParser::DefunToPartitionedCall(const domi::tensorflow::NodeDef *node_def,
ge::OpDescPtr &op) const {
const string op_name = node_def->name(); const string op_name = node_def->name();
domi::tensorflow::AttrValue attr_call_inference; domi::tensorflow::AttrValue attr_call_inference;
if (!ge::TensorFlowUtil::FindAttrValue(node_def, "_disable_call_shape_inference", attr_call_inference)) { if (!ge::TensorFlowUtil::FindAttrValue(node_def, "_disable_call_shape_inference", attr_call_inference)) {
@@ -617,7 +616,7 @@ void TensorFlowModelParser::GetInputOutputTensorNum(const ge::OpDescPtr &op_desc
output_tensor_num = max_anchor_index + 1; output_tensor_num = max_anchor_index + 1;
} }


Status TensorFlowModelParser::CheckoutInputNum(ge::OpDescPtr &op_desc, const domi::tensorflow::NodeDef *node) {
Status TensorFlowModelParser::CheckoutInputNum(ge::OpDescPtr &op_desc, const domi::tensorflow::NodeDef *node) const {
GE_CHECK_NOTNULL(node); GE_CHECK_NOTNULL(node);
GE_CHECK_NOTNULL(op_desc); GE_CHECK_NOTNULL(op_desc);


@@ -695,7 +694,8 @@ void TensorFlowModelParser::UpdateOutputTensor(ge::OpDescPtr &op_desc, const std
} }
} }


Status TensorFlowModelParser::AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::tensorflow::NodeDef *node) {
Status TensorFlowModelParser::AddTensorDescToOpDesc(ge::OpDescPtr &op_desc,
const domi::tensorflow::NodeDef *node) const {
GE_CHECK_NOTNULL(node); GE_CHECK_NOTNULL(node);
GE_CHECK_NOTNULL(op_desc); GE_CHECK_NOTNULL(op_desc);
// get input and output attr from tensorflow // get input and output attr from tensorflow
@@ -2654,10 +2654,9 @@ Status TensorFlowModelParser::GraphDefOptimizeSnapShot(domi::tensorflow::GraphDe
return SUCCESS; return SUCCESS;
} }


Status TensorFlowModelParser::SetDestNodeName(domi::tensorflow::NodeDef *const node_current,
domi::tensorflow::NodeDef *const node_dest,
const int32_t input_idx, const bool is_control,
bool &clear_input_flag) {
Status TensorFlowModelParser::SetDestNodeName(const domi::tensorflow::NodeDef *const node_current,
domi::tensorflow::NodeDef *const node_dest, const int32_t input_idx,
const bool is_control, bool &clear_input_flag) {
GELOGI("current node name is %s ", node_current->name().c_str()); GELOGI("current node name is %s ", node_current->name().c_str());
clear_input_flag = true; clear_input_flag = true;
if (is_control) { if (is_control) {
@@ -2703,7 +2702,7 @@ void TensorFlowModelParser::OptimizeDestroyTemporaryVariable(domi::tensorflow::G
if (nodeDstInputNameTmp != nodeCurrent->name()) { if (nodeDstInputNameTmp != nodeCurrent->name()) {
continue; continue;
} }
if (SetDestNodeName(nodeCurrent, nodeDst, k, isControl, clearInputFlag) !=SUCCESS) {
if (SetDestNodeName(nodeCurrent, nodeDst, k, isControl, clearInputFlag) != SUCCESS) {
GELOGE(FAILED, "CheckInputNodeName failed, node is: %s", nodeCurrent->name().c_str()); GELOGE(FAILED, "CheckInputNodeName failed, node is: %s", nodeCurrent->name().c_str());
return; return;
} }
@@ -3599,7 +3598,7 @@ Status TensorFlowModelParser::RemoveIsolateNode(domi::tensorflow::GraphDef *grap
} }


Status TensorFlowModelParser::RecordFusionResult(const std::shared_ptr<ge::ScopeGraph> &scope_graph, Status TensorFlowModelParser::RecordFusionResult(const std::shared_ptr<ge::ScopeGraph> &scope_graph,
const domi::tensorflow::NodeDef *node, ge::OpDescPtr &op_desc) {
const domi::tensorflow::NodeDef *node, const ge::OpDescPtr &op_desc) {
// The caller guarantees that the pointer is not null // The caller guarantees that the pointer is not null
GELOGI("RecordFusionResult for %s start.", op_desc->GetName().c_str()); GELOGI("RecordFusionResult for %s start.", op_desc->GetName().c_str());
auto &impl_scope_graph = scope_graph->impl_; auto &impl_scope_graph = scope_graph->impl_;
@@ -3994,7 +3993,7 @@ void TensorFlowModelParser::DumpAllNodeContext(const string &phase) const {
} }
} }


Status TensorFlowModelParser::CheckAndUpdateInputDesc(ge::ComputeGraphPtr &compute_graph) {
Status TensorFlowModelParser::CheckAndUpdateInputDesc(const ge::ComputeGraphPtr &compute_graph) {
GE_CHECK_NOTNULL(compute_graph); GE_CHECK_NOTNULL(compute_graph);
for (auto &node : compute_graph->GetDirectNode()) { for (auto &node : compute_graph->GetDirectNode()) {
auto op_desc = node->GetOpDesc(); auto op_desc = node->GetOpDesc();


+ 7
- 7
parser/tensorflow/tensorflow_parser.h View File

@@ -77,7 +77,7 @@ struct DelTransposeInfo;
class PARSER_FUNC_VISIBILITY TensorFlowModelParser : public domi::ModelParser { class PARSER_FUNC_VISIBILITY TensorFlowModelParser : public domi::ModelParser {
public: public:
TensorFlowModelParser() {} TensorFlowModelParser() {}
~TensorFlowModelParser() {}
~TensorFlowModelParser() override {}


/** /**
* @ingroup domi_omg * @ingroup domi_omg
@@ -459,7 +459,7 @@ class PARSER_FUNC_VISIBILITY TensorFlowModelParser : public domi::ModelParser {
Status OptimizeSnapShot(domi::tensorflow::NodeDef *curr_mode_def, map<string, NodeDef *> &nodedef_map, Status OptimizeSnapShot(domi::tensorflow::NodeDef *curr_mode_def, map<string, NodeDef *> &nodedef_map,
const std::pair<string, int> &input_data, const std::vector<string> &control_list); const std::pair<string, int> &input_data, const std::vector<string> &control_list);


static Status SetDestNodeName(domi::tensorflow::NodeDef *const node_current,
static Status SetDestNodeName(const domi::tensorflow::NodeDef *const node_current,
domi::tensorflow::NodeDef *const node_dest, const int32_t input_idx, domi::tensorflow::NodeDef *const node_dest, const int32_t input_idx,
const bool is_control, bool &clear_input_flag); const bool is_control, bool &clear_input_flag);


@@ -503,8 +503,8 @@ class PARSER_FUNC_VISIBILITY TensorFlowModelParser : public domi::ModelParser {
domi::tensorflow::GraphDef *const output_graph_def); domi::tensorflow::GraphDef *const output_graph_def);
static string NodeNameFromInput(const string &input_name); static string NodeNameFromInput(const string &input_name);


Status AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::tensorflow::NodeDef *node);
Status CheckoutInputNum(ge::OpDescPtr &op_desc, const domi::tensorflow::NodeDef *node);
Status AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::tensorflow::NodeDef *node) const;
Status CheckoutInputNum(ge::OpDescPtr &op_desc, const domi::tensorflow::NodeDef *node) const;
static void UpdateInputTensor(ge::OpDescPtr &op_desc, const std::vector<ge::GeTensorDesc> &input_desc, static void UpdateInputTensor(ge::OpDescPtr &op_desc, const std::vector<ge::GeTensorDesc> &input_desc,
const size_t input_tensor_num); const size_t input_tensor_num);
static void UpdateOutputTensor(ge::OpDescPtr &op_desc, const std::vector<ge::GeTensorDesc> &output_desc, static void UpdateOutputTensor(ge::OpDescPtr &op_desc, const std::vector<ge::GeTensorDesc> &output_desc,
@@ -527,7 +527,7 @@ class PARSER_FUNC_VISIBILITY TensorFlowModelParser : public domi::ModelParser {
* @param [out] op: result of PartitionedCall OpDesc. * @param [out] op: result of PartitionedCall OpDesc.
* @return 0: SUCCESS / Others: FAILED * @return 0: SUCCESS / Others: FAILED
*/ */
Status DefunToPartitionedCall(const domi::tensorflow::NodeDef *node_def, ge::OpDescPtr &op);
Status DefunToPartitionedCall(const domi::tensorflow::NodeDef *node_def, ge::OpDescPtr &op) const;


/** /**
* @ingroup domi_omg * @ingroup domi_omg
@@ -604,7 +604,7 @@ class PARSER_FUNC_VISIBILITY TensorFlowModelParser : public domi::ModelParser {
Status RemoveIsolateNode(domi::tensorflow::GraphDef *graph_def); Status RemoveIsolateNode(domi::tensorflow::GraphDef *graph_def);
static Status RecordFusionResult(const std::shared_ptr<ge::ScopeGraph> &scope_graph, static Status RecordFusionResult(const std::shared_ptr<ge::ScopeGraph> &scope_graph,
const domi::tensorflow::NodeDef *node, const domi::tensorflow::NodeDef *node,
ge::OpDescPtr &op_desc);
const ge::OpDescPtr &op_desc);


static Status GetFunctionProto(const string &file, domi::tensorflow::GraphDefLibrary &graph_def_library); static Status GetFunctionProto(const string &file, domi::tensorflow::GraphDefLibrary &graph_def_library);


@@ -646,7 +646,7 @@ class PARSER_FUNC_VISIBILITY TensorFlowModelParser : public domi::ModelParser {


static Status ParseOpParams(const domi::tensorflow::NodeDef *node_def, ge::OpDescPtr &op, static Status ParseOpParams(const domi::tensorflow::NodeDef *node_def, ge::OpDescPtr &op,
const shared_ptr<OpParser> &op_parser); const shared_ptr<OpParser> &op_parser);
static Status CheckAndUpdateInputDesc(ge::ComputeGraphPtr &compute_graph);
static Status CheckAndUpdateInputDesc(const ge::ComputeGraphPtr &compute_graph);
static Status UpdateOutputsInfo(const ParserUtils::OutputMapping &final_output_nodes); static Status UpdateOutputsInfo(const ParserUtils::OutputMapping &final_output_nodes);
static Status AddExternalGraph(const ComputeGraphPtr &root_graph); static Status AddExternalGraph(const ComputeGraphPtr &root_graph);




+ 1
- 1
parser/tensorflow/tensorflow_parser_register.h View File

@@ -78,7 +78,7 @@ class PARSER_FUNC_VISIBILITY TensorflowParserBuilder : public TensorflowWeightPa
} }
// register to OpParserFactory // register to OpParserFactory
OpParserRegisterar registerar __attribute__((unused)) = OpParserRegisterar( OpParserRegisterar registerar __attribute__((unused)) = OpParserRegisterar(
domi::TENSORFLOW, davinci_optype_, [=] { return std::shared_ptr<OpParser>(op_parser_adapter); });
domi::TENSORFLOW, davinci_optype_, [op_parser_adapter] { return std::shared_ptr<OpParser>(op_parser_adapter); });
return true; return true;
} }




+ 5
- 7
parser/tensorflow/tensorflow_util.cc View File

@@ -121,7 +121,8 @@ const uint32_t TENSORFLOW_NORMAL_OUTPUT_TENSOR_FLAG = 2;


using AttrValueMap = ::google::protobuf::Map<std::string, domi::tensorflow::AttrValue>; using AttrValueMap = ::google::protobuf::Map<std::string, domi::tensorflow::AttrValue>;
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool TensorFlowUtil::FindAttrValue( FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool TensorFlowUtil::FindAttrValue(
const domi::tensorflow::NodeDef *node_def, const std::string &attr_name, domi::tensorflow::AttrValue &attr_value) {
const domi::tensorflow::NodeDef *const node_def, const std::string &attr_name,
domi::tensorflow::AttrValue &attr_value) {
GE_CHECK_NOTNULL(node_def); GE_CHECK_NOTNULL(node_def);
const google::protobuf::Map<std::string, domi::tensorflow::AttrValue> &attr = node_def->attr(); const google::protobuf::Map<std::string, domi::tensorflow::AttrValue> &attr = node_def->attr();
const google::protobuf::Map<std::string, domi::tensorflow::AttrValue>::const_iterator it = attr.find(attr_name); const google::protobuf::Map<std::string, domi::tensorflow::AttrValue>::const_iterator it = attr.find(attr_name);
@@ -250,12 +251,12 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool TensorFlowUtil::ParseFromA
} }


FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY domi::Status TensorFlowUtil::TransTensorDescriptor( FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY domi::Status TensorFlowUtil::TransTensorDescriptor(
const domi::tensorflow::AttrValue &attr_value, ParserOperator *op, const uint32_t io, const std::string &type) {
const domi::tensorflow::AttrValue &attr_value, ParserOperator *const op,
const uint32_t io, const std::string &type) {
GE_CHECK_NOTNULL(op); GE_CHECK_NOTNULL(op);
if (!attr_value.has_list()) { if (!attr_value.has_list()) {
return PARAM_INVALID; return PARAM_INVALID;
} }

std::vector<int32_t> tf_in_type; std::vector<int32_t> tf_in_type;
std::vector<int32_t> tf_out_type; std::vector<int32_t> tf_out_type;
// list contain many TensorDescriptors // list contain many TensorDescriptors
@@ -265,10 +266,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY domi::Status TensorFlowUtil::Tr
int32_t tf_datatype = 0; int32_t tf_datatype = 0;
GE_CHK_BOOL_RET_STATUS(ParseFromAttrValueList(ge_desc, a_list, i, tf_datatype), PARAM_INVALID, GE_CHK_BOOL_RET_STATUS(ParseFromAttrValueList(ge_desc, a_list, i, tf_datatype), PARAM_INVALID,
"parse ge_desc failed."); "parse ge_desc failed.");

uint32_t size_type = 1; uint32_t size_type = 1;
int64_t tmp_dim = 0; int64_t tmp_dim = 0;

auto data_type = ge_desc.GetDataType(); auto data_type = ge_desc.GetDataType();
GE_CHK_BOOL_RET_STATUS(ge::TypeUtils::GetDataTypeLength(data_type, size_type), PARAM_INVALID, GE_CHK_BOOL_RET_STATUS(ge::TypeUtils::GetDataTypeLength(data_type, size_type), PARAM_INVALID,
"dataType no define size , parse ge_desc failed."); "dataType no define size , parse ge_desc failed.");
@@ -285,7 +284,6 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY domi::Status TensorFlowUtil::Tr
ge_desc.SetShape(ge::GeShape(data_dim)); break;); ge_desc.SetShape(ge::GeShape(data_dim)); break;);
} }
ge::TensorUtils::SetRealDimCnt(ge_desc, ge_desc.GetShape().GetDimNum()); ge::TensorUtils::SetRealDimCnt(ge_desc, ge_desc.GetShape().GetDimNum());

GELOGD("IO:%d: after translate tf_desc, datatype: %s, format: %s, size_type: %u", io, GELOGD("IO:%d: after translate tf_desc, datatype: %s, format: %s, size_type: %u", io,
ge::TypeUtils::DataTypeToSerialString(ge_desc.GetDataType()).c_str(), ge::TypeUtils::DataTypeToSerialString(ge_desc.GetDataType()).c_str(),
ge::TypeUtils::FormatToSerialString(ge_desc.GetFormat()).c_str(), size_type); ge::TypeUtils::FormatToSerialString(ge_desc.GetFormat()).c_str(), size_type);
@@ -303,7 +301,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY domi::Status TensorFlowUtil::Tr
return SUCCESS; return SUCCESS;
} }
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void TensorFlowUtil::AddNodeAttr( FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void TensorFlowUtil::AddNodeAttr(
const std::string &attr_name, const domi::tensorflow::AttrValue &value, domi::tensorflow::NodeDef *node_def) {
const std::string &attr_name, const domi::tensorflow::AttrValue &value, domi::tensorflow::NodeDef *const node_def) {
GE_CHK_BOOL_TRUE_EXEC_INFO(node_def == nullptr, return, "input parameter is null."); GE_CHK_BOOL_TRUE_EXEC_INFO(node_def == nullptr, return, "input parameter is null.");
node_def->mutable_attr()->insert(AttrValueMap::value_type(attr_name, value)); node_def->mutable_attr()->insert(AttrValueMap::value_type(attr_name, value));
} }


+ 3
- 3
parser/tensorflow/tensorflow_util.h View File

@@ -143,7 +143,7 @@ class TensorFlowUtil {
* @return false attribute does not exist * @return false attribute does not exist
* *
*/ */
static bool FindAttrValue(const domi::tensorflow::NodeDef *node_def, const std::string &attr_name,
static bool FindAttrValue(const domi::tensorflow::NodeDef *const node_def, const std::string &attr_name,
domi::tensorflow::AttrValue &attr_value); domi::tensorflow::AttrValue &attr_value);


/** /**
@@ -181,7 +181,7 @@ class TensorFlowUtil {
* *
*/ */
static domi::Status TransTensorDescriptor(const domi::tensorflow::AttrValue &attr_value, static domi::Status TransTensorDescriptor(const domi::tensorflow::AttrValue &attr_value,
ParserOperator *op,
ParserOperator *const op,
const uint32_t io, const uint32_t io,
const std::string &type = ""); const std::string &type = "");
/* /*
@@ -194,7 +194,7 @@ class TensorFlowUtil {
*/ */
static void AddNodeAttr(const std::string &attr_name, static void AddNodeAttr(const std::string &attr_name,
const domi::tensorflow::AttrValue &value, const domi::tensorflow::AttrValue &value,
domi::tensorflow::NodeDef *node_def);
domi::tensorflow::NodeDef *const node_def);


static domi::Status ClearUnusedParam(ge::ComputeGraphPtr &graph); static domi::Status ClearUnusedParam(ge::ComputeGraphPtr &graph);




Loading…
Cancel
Save